CR7CAD commited on
Commit
5b94bbe
·
verified ·
1 Parent(s): c2290eb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +264 -291
app.py CHANGED
@@ -1,11 +1,6 @@
1
- import os
2
- import io
3
  import streamlit as st
4
- import docx
5
- import docx2txt
6
- import tempfile
7
- import time
8
- import re
9
  import pandas as pd
10
  from functools import lru_cache
11
 
@@ -17,24 +12,21 @@ except ImportError:
17
  from transformers import AutoModelForSequenceClassification, AutoTokenizer, AutoModelForSeq2SeqLM
18
  import torch
19
  has_pipeline = False
20
- st.warning("Using basic transformers functionality instead of pipeline API")
21
 
22
- # Set page title and hide sidebar
23
  st.set_page_config(page_title="Resume-Job Fit Analyzer", initial_sidebar_state="collapsed")
24
- st.markdown("""<style>[data-testid="collapsedControl"] {display: none;}section[data-testid="stSidebar"] {display: none;}</style>""", unsafe_allow_html=True)
25
 
26
  #####################################
27
- # Preload Models & Helper Functions
28
  #####################################
29
- @st.cache_resource(show_spinner=True)
30
  def load_models():
31
- """Load models at startup"""
32
- with st.spinner("Loading AI models... This may take a minute on first run."):
33
  models = {}
34
-
35
  # Load summarization model
36
  if has_pipeline:
37
- models['summarizer'] = pipeline("summarization", model="Falconsai/text_summarization", max_length=100, truncation=True)
38
  else:
39
  try:
40
  models['summarizer_model'] = AutoModelForSeq2SeqLM.from_pretrained("Falconsai/text_summarization")
@@ -53,453 +45,430 @@ def load_models():
53
  except Exception as e:
54
  st.error(f"Error loading sentiment model: {e}")
55
  models['evaluator_model'] = models['evaluator_tokenizer'] = None
56
-
57
  return models
58
 
59
  def summarize_text(text, models, max_length=100):
60
- """Summarize text using available models with fallbacks"""
61
- # Truncate input to prevent issues with long texts
62
  input_text = text[:1024]
63
 
64
- # Try pipeline first
65
  if has_pipeline and 'summarizer' in models:
66
  try:
67
  return models['summarizer'](input_text)[0]['summary_text']
68
- except Exception as e:
69
- st.warning(f"Error in pipeline summarization: {e}")
70
 
71
  # Try manual model
72
- if 'summarizer_model' in models and 'summarizer_tokenizer' in models and models['summarizer_model']:
73
  try:
74
  tokenizer = models['summarizer_tokenizer']
75
  model = models['summarizer_model']
76
  inputs = tokenizer(input_text, return_tensors="pt", truncation=True, max_length=1024)
77
- summary_ids = model.generate(inputs.input_ids, max_length=max_length, min_length=30, num_beams=4, early_stopping=True)
78
  return tokenizer.decode(summary_ids[0], skip_special_tokens=True)
79
- except Exception as e:
80
- st.warning(f"Error in manual summarization: {e}")
81
 
82
- # Fallback to basic summarization
83
- return basic_summarize(text, max_length)
84
-
85
- def basic_summarize(text, max_length=100):
86
- """Basic extractive text summarization"""
87
  sentences = re.split(r'(?<!\w\.\w.)(?<![A-Z][a-z]\.)(?<=\.|\?)\s', text)
88
-
89
- # Score and filter sentences
90
- scored_sentences = []
91
- for i, sentence in enumerate(sentences):
92
- if len(sentence.split()) >= 4:
93
- score = 1.0 / (i + 1) - (0.01 * max(0, len(sentence.split()) - 20))
94
- scored_sentences.append((score, sentence))
95
-
96
- # Get top sentences
97
- scored_sentences.sort(reverse=True)
98
- summary_sentences = []
99
- current_length = 0
100
-
101
- for _, sentence in scored_sentences:
102
- if current_length + len(sentence.split()) <= max_length:
103
- summary_sentences.append(sentence)
104
- current_length += len(sentence.split())
105
- else:
106
- break
107
-
108
- # Restore original sentence order
109
- if summary_sentences:
110
- original_order = [(sentences.index(s), s) for s in summary_sentences]
111
- original_order.sort()
112
- summary_sentences = [s for _, s in original_order]
113
-
114
- return " ".join(summary_sentences)
115
 
116
  #####################################
117
- # Information Extraction Functions
118
  #####################################
119
- @st.cache_data(show_spinner=False)
120
  def extract_text_from_file(file_obj):
121
- """Extract text from uploaded document file"""
122
- filename = file_obj.name
123
- ext = os.path.splitext(filename)[1].lower()
124
 
125
  if ext == ".docx":
126
  try:
127
  document = docx.Document(file_obj)
128
- text = "\n".join(para.text for para in document.paragraphs if para.text.strip())
129
  except Exception as e:
130
  return f"Error processing DOCX file: {e}"
131
  elif ext == ".doc":
132
  try:
133
  with tempfile.NamedTemporaryFile(delete=False, suffix='.doc') as temp_file:
134
  temp_file.write(file_obj.getvalue())
135
- temp_path = temp_file.name
136
-
137
- text = docx2txt.process(temp_path)
138
- os.unlink(temp_path)
139
  except Exception as e:
140
  return f"Error processing DOC file: {e}"
141
  elif ext == ".txt":
142
  try:
143
- text = file_obj.getvalue().decode("utf-8")
144
  except Exception as e:
145
  return f"Error processing TXT file: {e}"
146
  else:
147
  return "Unsupported file type. Please upload a .docx, .doc, or .txt file."
148
-
149
- return text[:15000] if text else text
150
 
 
151
  def extract_skills(text):
152
- """Extract key skills from the resume"""
 
153
  skill_keywords = {
154
- "Programming": ["Python", "Java", "JavaScript", "HTML", "CSS", "SQL", "C++", "C#", "Go", "React", "Angular", "Vue", "Node.js"],
155
- "Data Science": ["Machine Learning", "Data Analysis", "Statistics", "TensorFlow", "PyTorch", "AI", "Algorithms", "NLP", "Deep Learning"],
156
- "Database": ["SQL", "MySQL", "MongoDB", "Database", "NoSQL", "PostgreSQL", "Oracle", "Redis"],
157
- "Web Development": ["React", "Angular", "Node.js", "Frontend", "Backend", "Full-Stack", "REST API", "GraphQL"],
158
- "Software Development": ["Agile", "Scrum", "Git", "DevOps", "Docker", "System Design", "CI/CD", "Jenkins"],
159
- "Cloud": ["AWS", "Azure", "Google Cloud", "Cloud Computing", "Lambda", "S3", "EC2"],
160
- "Security": ["Cybersecurity", "Network Security", "Encryption", "Security"],
161
- "Business": ["Project Management", "Business Analysis", "Leadership", "Teamwork", "Agile", "Scrum"],
162
- "Design": ["UX/UI", "User Experience", "Design Thinking", "Adobe", "Figma"]
 
 
 
 
 
 
 
 
163
  }
164
 
165
  text_lower = text.lower()
166
- return [skill for category, skills in skill_keywords.items()
167
- for skill in skills if skill.lower() in text_lower]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
168
 
169
  @lru_cache(maxsize=32)
170
  def extract_name(text_start):
171
- """Extract candidate name from the beginning of resume text"""
172
- lines = text_start.split('\n')
173
- potential_name_lines = [line.strip() for line in lines[:5] if line.strip()]
174
 
175
- if potential_name_lines:
176
- first_line = potential_name_lines[0]
177
- if 5 <= len(first_line) <= 40 and not any(x in first_line.lower() for x in ["resume", "cv", "curriculum", "vitae", "profile"]):
178
  return first_line
179
 
180
- for line in potential_name_lines[:3]:
181
  if len(line.split()) <= 4 and not any(x in line.lower() for x in ["address", "phone", "email", "resume", "cv"]):
182
  return line
183
-
184
- return "Unknown (please extract from resume)"
185
 
186
  def extract_age(text):
187
- """Extract candidate age from resume text"""
188
- age_patterns = [
189
- r'age:?\s*(\d{1,2})',
190
- r'(\d{1,2})\s*years\s*old',
191
- r'dob:.*(\d{4})',
192
- r'date of birth:.*(\d{4})'
193
- ]
194
-
195
- text_lower = text.lower()
196
- for pattern in age_patterns:
197
- matches = re.search(pattern, text_lower)
198
- if matches:
199
- # Convert birth year to age if needed
200
- if len(matches.group(1)) == 4:
201
- try:
202
- return str(2025 - int(matches.group(1)))
203
- except:
204
- pass
205
- return matches.group(1)
206
-
207
  return "Not specified"
208
 
209
  def extract_industry(text):
210
- """Extract expected job industry from resume"""
211
- industry_keywords = {
212
  "Technology": ["software", "programming", "developer", "IT", "tech", "computer", "digital"],
213
  "Finance": ["banking", "financial", "accounting", "finance", "analyst"],
214
- "Healthcare": ["medical", "health", "hospital", "clinical", "nurse", "doctor", "patient"],
215
- "Education": ["teaching", "teacher", "professor", "education", "university", "school", "academic"],
216
  "Marketing": ["marketing", "advertising", "digital marketing", "social media", "brand"],
217
  "Engineering": ["engineer", "engineering", "mechanical", "civil", "electrical"],
218
  "Data Science": ["data science", "machine learning", "AI", "analytics", "big data"],
219
- "Management": ["manager", "management", "leadership", "executive", "director"],
220
- "Consulting": ["consultant", "consulting", "advisor"],
221
- "Sales": ["sales", "business development", "account manager", "client relations"]
222
  }
223
 
224
  text_lower = text.lower()
225
- industry_counts = {industry: sum(text_lower.count(keyword.lower()) for keyword in keywords)
226
- for industry, keywords in industry_keywords.items()}
227
-
228
- return max(industry_counts.items(), key=lambda x: x[1])[0] if any(industry_counts.values()) else "Not clearly specified"
229
 
230
  def extract_job_position(text):
231
- """Extract expected job position from resume"""
232
- objective_patterns = [
233
- r'objective:?\s*(.*?)(?=\n\n|\n\w+:|\Z)',
234
- r'career\s*objective:?\s*(.*?)(?=\n\n|\n\w+:|\Z)',
235
- r'professional\s*summary:?\s*(.*?)(?=\n\n|\n\w+:|\Z)',
236
- r'summary:?\s*(.*?)(?=\n\n|\n\w+:|\Z)',
237
- r'seeking\s*(?:a|an)?\s*(?:position|role|opportunity)\s*(?:as|in)?\s*(?:a|an)?\s*([^.]*)'
238
- ]
239
-
240
  text_lower = text.lower()
241
- for pattern in objective_patterns:
 
242
  match = re.search(pattern, text_lower, re.IGNORECASE | re.DOTALL)
243
  if match:
244
- objective_text = match.group(1).strip()
245
- job_titles = ["developer", "engineer", "analyst", "manager", "director", "specialist",
246
- "coordinator", "consultant", "designer", "architect", "administrator"]
247
-
248
- for title in job_titles:
249
- if title in objective_text:
250
- title_pattern = r'(?:a|an)?\s*(\w+\s+' + title + r'|\w+\s+\w+\s+' + title + r')'
251
- title_match = re.search(title_pattern, objective_text)
252
- if title_match:
253
- return title_match.group(1).strip().title()
254
- return title.title()
255
-
256
- if len(objective_text) > 10:
257
- words = objective_text.split()
258
- return " ".join(words[:10]).title() + "..." if len(words) > 10 else objective_text.title()
259
-
260
- job_patterns = [
261
- r'experience:.*?(\w+\s+\w+(?:\s+\w+)?)(?=\s*at|\s*\(|\s*-|\s*,|\s*\d{4}|\n)',
262
- r'(\w+\s+\w+(?:\s+\w+)?)\s*\(\s*current\s*\)',
263
- r'(\w+\s+\w+(?:\s+\w+)?)\s*\(\s*present\s*\)'
264
- ]
265
-
266
- for pattern in job_patterns:
267
  match = re.search(pattern, text_lower, re.IGNORECASE)
268
- if match:
269
- return match.group(1).strip().title()
270
 
271
- return "Not explicitly stated"
272
 
273
  #####################################
274
  # Core Analysis Functions
275
  #####################################
276
  def summarize_resume_text(resume_text, models):
277
- """Generate a structured summary of resume text"""
278
- start_time = time.time()
279
 
280
- # Extract critical information
281
  name = extract_name(resume_text[:500])
282
  age = extract_age(resume_text)
283
  industry = extract_industry(resume_text)
284
  job_position = extract_job_position(resume_text)
285
  skills = extract_skills(resume_text)
286
 
287
- # Generate overall summary
288
  try:
289
  if has_pipeline and 'summarizer' in models:
290
- model_summary = models['summarizer'](resume_text[:2000], max_length=100, min_length=30, do_sample=False)[0]['summary_text']
291
  else:
292
  model_summary = summarize_text(resume_text, models, max_length=100)
293
- except Exception as e:
294
- st.warning(f"Error in resume summarization: {e}")
295
- model_summary = "Error generating summary. Please check the original resume."
296
-
297
- # Format the structured summary
298
- formatted_summary = f"Name: {name}\n\n"
299
- formatted_summary += f"Age: {age}\n\n"
300
- formatted_summary += f"Expected Industry: {industry}\n\n"
301
- formatted_summary += f"Expected Job Position: {job_position}\n\n"
302
- formatted_summary += f"Skills: {', '.join(skills)}\n\n"
303
- formatted_summary += f"Summary: {model_summary}"
304
-
305
- return formatted_summary, time.time() - start_time
306
 
307
  def extract_job_requirements(job_description, models):
308
- """Extract key requirements from a job description"""
309
- # Combined skill list (abridged for brevity)
310
  tech_skills = [
311
- "Python", "Java", "C++", "JavaScript", "TypeScript", "SQL", "HTML", "CSS", "React", "Angular",
312
- "Machine Learning", "Data Science", "AI", "AWS", "Azure", "Docker", "Kubernetes", "MySQL",
313
- "MongoDB", "PostgreSQL", "Project Management", "Agile", "Scrum", "Leadership", "Communication",
314
- "Problem Solving", "Git", "DevOps", "Full Stack", "Mobile Development", "Android", "iOS"
 
 
 
 
 
 
 
315
  ]
316
 
317
- clean_job_text = job_description.lower()
318
 
319
  # Extract job title
320
- title_patterns = [
321
- r'^([^:.\n]+?)(position|role|job|opening|vacancy)',
322
- r'^([^:.\n]+?)\n',
323
- r'(hiring|looking for(?: a| an)?|recruiting)(?: a| an)? ([^:.\n]+?)(:-|[.:]|\n|$)'
324
- ]
325
-
326
  job_title = "Not specified"
327
- for pattern in title_patterns:
328
- title_match = re.search(pattern, clean_job_text, re.IGNORECASE)
329
- if title_match:
330
- potential_title = title_match.group(1).strip() if len(title_match.groups()) >= 1 else title_match.group(2).strip()
331
- if 3 <= len(potential_title) <= 50:
332
- job_title = potential_title.capitalize()
333
  break
334
 
335
- # Extract years of experience
336
- exp_patterns = [
337
- r'(\d+)(?:\+)?\s*(?:years|yrs)(?:\s*of)?\s*(?:experience|exp)',
338
- r'experience\s*(?:of)?\s*(\d+)(?:\+)?\s*(?:years|yrs)'
339
- ]
340
-
341
  years_required = 0
342
- for pattern in exp_patterns:
343
- exp_match = re.search(pattern, clean_job_text, re.IGNORECASE)
344
- if exp_match:
345
  try:
346
- years_required = int(exp_match.group(1))
347
  break
348
- except:
349
- pass
350
 
351
- # Extract required skills
352
- required_skills = [skill for skill in tech_skills if re.search(r'\b' + re.escape(skill.lower()) + r'\b', clean_job_text)]
353
 
354
  # Fallback if no skills found
355
  if not required_skills:
356
- words = re.findall(r'\b\w{4,}\b', clean_job_text)
 
357
  word_counts = {}
358
- for word in words:
359
- if word not in ["with", "that", "this", "have", "from", "they", "will", "what", "your", "their", "about"]:
360
- word_counts[word] = word_counts.get(word, 0) + 1
361
- sorted_words = sorted(word_counts.items(), key=lambda x: x[1], reverse=True)
362
- required_skills = [word.capitalize() for word, _ in sorted_words[:5]]
363
-
364
- job_summary = summarize_text(job_description, models, max_length=100)
365
 
366
  return {
367
  "title": job_title,
368
  "years_experience": years_required,
369
  "required_skills": required_skills,
370
- "summary": job_summary
371
  }
372
 
373
  def evaluate_job_fit(resume_summary, job_requirements, models):
374
- """Evaluate how well a resume matches job requirements"""
375
- start_time = time.time()
376
 
377
- # Extract information
378
  required_skills = job_requirements["required_skills"]
379
  years_required = job_requirements["years_experience"]
380
  job_title = job_requirements["title"]
381
  skills_mentioned = extract_skills(resume_summary)
382
 
383
- # Calculate match percentages
384
  matching_skills = [skill for skill in required_skills if skill in skills_mentioned]
385
- skill_match_percentage = len(matching_skills) / len(required_skills) if required_skills else 0
386
-
387
- # Extract experience level from resume
388
- experience_pattern = r'(\d+)\+?\s*years?\s*(?:of)?\s*experience'
389
- years_experience = 0
390
- experience_match = re.search(experience_pattern, resume_summary, re.IGNORECASE)
391
- if experience_match:
392
- try:
393
- years_experience = int(experience_match.group(1))
394
- except:
395
- pass
396
 
397
- # Calculate match scores
398
- exp_match_ratio = min(1.0, years_experience / max(1, years_required)) if years_required > 0 else 0.5
 
 
 
 
 
 
 
399
 
400
- # Job title match score
401
- title_words = [word for word in job_title.lower().split() if len(word) > 3]
402
- title_matches = sum(1 for word in title_words if word in resume_summary.lower())
403
- title_match = title_matches / len(title_words) if title_words else 0
 
 
 
 
 
 
 
 
 
 
 
404
 
405
- # Calculate individual scores
406
- skill_score = min(2, skill_match_percentage * 3)
407
- exp_score = min(2, exp_match_ratio * 2)
408
- title_score = min(2, title_match * 2)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
409
 
410
  # Extract candidate info
411
- name_match = re.search(r'Name:\s*(.*?)(?=\n|\Z)', resume_summary)
412
- name = name_match.group(1).strip() if name_match else "The candidate"
413
 
414
- industry_match = re.search(r'Expected Industry:\s*(.*?)(?=\n|\Z)', resume_summary)
415
- industry = industry_match.group(1).strip() if industry_match else "unspecified industry"
416
 
417
- # Calculate final weighted score
418
- weighted_score = (skill_score * 0.5) + (exp_score * 0.3) + (title_score * 0.2)
419
 
420
- # Determine fit score
421
- if weighted_score >= 1.5:
 
 
 
422
  fit_score = 2 # Good fit
423
- elif weighted_score >= 0.8:
424
- fit_score = 1 # Potential fit
425
  else:
426
  fit_score = 0 # Not a fit
427
 
428
- # Generate assessment text
429
- missing_skills = [skill for skill in required_skills if skill not in skills_mentioned]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
430
 
431
  if fit_score == 2:
432
- fit_assessment = f"{fit_score}: GOOD FIT - {name} demonstrates strong alignment with the {job_title} position. Their background in {industry} and professional experience appear well-suited for this role's requirements. The technical expertise matches what the position demands."
433
  elif fit_score == 1:
434
- fit_assessment = f"{fit_score}: POTENTIAL FIT - {name} shows potential for the {job_title} role with some relevant experience, though there are gaps in certain technical areas. Their {industry} background provides partial alignment with the position requirements. Additional training might be needed in {', '.join(missing_skills[:2])} if pursuing this opportunity."
435
  else:
436
- fit_assessment = f"{fit_score}: NO FIT - {name}'s current background shows limited alignment with this {job_title} position. Their experience level and technical background differ significantly from the role requirements. A position better matching their {industry} expertise might be more suitable."
437
 
438
- return fit_assessment, fit_score, time.time() - start_time
439
 
440
  def analyze_job_fit(resume_summary, job_description, models):
441
- """End-to-end job fit analysis"""
442
- start_time = time.time()
443
  job_requirements = extract_job_requirements(job_description, models)
444
- assessment, fit_score, execution_time = evaluate_job_fit(resume_summary, job_requirements, models)
445
- return assessment, fit_score, time.time() - start_time
446
 
447
  #####################################
448
  # Main Function
449
  #####################################
450
  def main():
451
- """Main function for the Streamlit application"""
 
 
 
452
  st.title("Resume-Job Fit Analyzer")
453
- st.markdown("Upload your resume file in **.docx**, **.doc**, or **.txt** format and enter a job description to see how well you match with the job requirements.")
454
 
455
- # Load models
456
  models = load_models()
457
-
458
- # User inputs
459
- uploaded_file = st.file_uploader("Upload your resume (.docx, .doc, or .txt)", type=["docx", "doc", "txt"])
460
  job_description = st.text_area("Enter Job Description", height=200, placeholder="Paste the job description here...")
461
 
462
  # Process when button clicked
463
- if uploaded_file is not None and job_description and st.button("Analyze Job Fit"):
464
- progress_bar = st.progress(0)
465
- status_text = st.empty()
466
 
467
  # Step 1: Extract text
468
- status_text.text("Step 1/3: Extracting text from resume...")
469
  resume_text = extract_text_from_file(uploaded_file)
470
- progress_bar.progress(25)
471
 
472
  if resume_text.startswith("Error") or resume_text == "Unsupported file type. Please upload a .docx, .doc, or .txt file.":
473
  st.error(resume_text)
474
  else:
475
  # Step 2: Generate summary
476
- status_text.text("Step 2/3: Analyzing resume and generating summary...")
477
- summary, summarization_time = summarize_resume_text(resume_text, models)
478
- progress_bar.progress(50)
479
-
480
- # Display summary
481
  st.subheader("Your Resume Summary")
482
  st.markdown(summary)
483
 
484
- # Step 3: Generate job fit assessment
485
- status_text.text("Step 3/3: Evaluating job fit (this will take a moment)...")
486
- assessment, fit_score, assessment_time = analyze_job_fit(summary, job_description, models)
487
- progress_bar.progress(100)
488
- status_text.empty()
489
 
490
  # Display results
491
  st.subheader("Job Fit Assessment")
492
-
493
- # Display score with appropriate styling
494
  fit_labels = {0: "NOT FIT", 1: "POTENTIAL FIT", 2: "GOOD FIT"}
495
- score_colors = {0: "red", 1: "orange", 2: "green"}
496
- st.markdown(f"<h2 style='color: {score_colors[fit_score]};'>{fit_labels[fit_score]}</h2>", unsafe_allow_html=True)
497
  st.markdown(assessment)
498
- st.info(f"Analysis completed in {(summarization_time + assessment_time):.2f} seconds")
499
 
500
  # Recommendations
501
  st.subheader("Recommended Next Steps")
502
-
503
  if fit_score == 2:
504
  st.markdown("""
505
  - Apply for this position as you appear to be a good match
@@ -518,6 +487,10 @@ def main():
518
  - If interested in this field, focus on developing the required skills
519
  - Consider similar roles with fewer experience requirements
520
  """)
 
 
 
 
521
 
522
  if __name__ == "__main__":
523
  main()
 
1
+ import os, io, re, time, tempfile
 
2
  import streamlit as st
3
+ import docx, docx2txt
 
 
 
 
4
  import pandas as pd
5
  from functools import lru_cache
6
 
 
12
  from transformers import AutoModelForSequenceClassification, AutoTokenizer, AutoModelForSeq2SeqLM
13
  import torch
14
  has_pipeline = False
 
15
 
16
+ # Setup page
17
  st.set_page_config(page_title="Resume-Job Fit Analyzer", initial_sidebar_state="collapsed")
18
+ st.markdown("""<style>[data-testid="collapsedControl"],[data-testid="stSidebar"] {display: none;}</style>""", unsafe_allow_html=True)
19
 
20
  #####################################
21
+ # Model Loading & Text Processing
22
  #####################################
23
+ @st.cache_resource
24
  def load_models():
25
+ with st.spinner("Loading AI models..."):
 
26
  models = {}
 
27
  # Load summarization model
28
  if has_pipeline:
29
+ models['summarizer'] = pipeline("summarization", model="Falconsai/text_summarization", max_length=100)
30
  else:
31
  try:
32
  models['summarizer_model'] = AutoModelForSeq2SeqLM.from_pretrained("Falconsai/text_summarization")
 
45
  except Exception as e:
46
  st.error(f"Error loading sentiment model: {e}")
47
  models['evaluator_model'] = models['evaluator_tokenizer'] = None
 
48
  return models
49
 
50
  def summarize_text(text, models, max_length=100):
51
+ """Summarize text with fallbacks"""
 
52
  input_text = text[:1024]
53
 
54
+ # Try pipeline
55
  if has_pipeline and 'summarizer' in models:
56
  try:
57
  return models['summarizer'](input_text)[0]['summary_text']
58
+ except: pass
 
59
 
60
  # Try manual model
61
+ if 'summarizer_model' in models and models['summarizer_model']:
62
  try:
63
  tokenizer = models['summarizer_tokenizer']
64
  model = models['summarizer_model']
65
  inputs = tokenizer(input_text, return_tensors="pt", truncation=True, max_length=1024)
66
+ summary_ids = model.generate(inputs.input_ids, max_length=max_length, min_length=30, num_beams=4)
67
  return tokenizer.decode(summary_ids[0], skip_special_tokens=True)
68
+ except: pass
 
69
 
70
+ # Fallback - extract sentences
 
 
 
 
71
  sentences = re.split(r'(?<!\w\.\w.)(?<![A-Z][a-z]\.)(?<=\.|\?)\s', text)
72
+ scored = [(1.0/(i+1), s) for i, s in enumerate(sentences) if len(s.split()) >= 4]
73
+ scored.sort(reverse=True)
74
+
75
+ result, length = [], 0
76
+ for _, sentence in scored:
77
+ if length + len(sentence.split()) <= max_length:
78
+ result.append(sentence)
79
+ length += len(sentence.split())
80
+
81
+ if result:
82
+ ordered = sorted([(sentences.index(s), s) for s in result])
83
+ return " ".join(s for _, s in ordered)
84
+ return ""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
85
 
86
  #####################################
87
+ # File Processing & Information Extraction
88
  #####################################
89
+ @st.cache_data
90
  def extract_text_from_file(file_obj):
91
+ ext = os.path.splitext(file_obj.name)[1].lower()
 
 
92
 
93
  if ext == ".docx":
94
  try:
95
  document = docx.Document(file_obj)
96
+ return "\n".join(para.text for para in document.paragraphs if para.text.strip())[:15000]
97
  except Exception as e:
98
  return f"Error processing DOCX file: {e}"
99
  elif ext == ".doc":
100
  try:
101
  with tempfile.NamedTemporaryFile(delete=False, suffix='.doc') as temp_file:
102
  temp_file.write(file_obj.getvalue())
103
+ text = docx2txt.process(temp_file.name)
104
+ os.unlink(temp_file.name)
105
+ return text[:15000]
 
106
  except Exception as e:
107
  return f"Error processing DOC file: {e}"
108
  elif ext == ".txt":
109
  try:
110
+ return file_obj.getvalue().decode("utf-8")[:15000]
111
  except Exception as e:
112
  return f"Error processing TXT file: {e}"
113
  else:
114
  return "Unsupported file type. Please upload a .docx, .doc, or .txt file."
 
 
115
 
116
+ # Information extraction functions
117
  def extract_skills(text):
118
+ """Extract skills from text - expanded for better matching"""
119
+ # Expanded skill keywords dictionary
120
  skill_keywords = {
121
+ "Programming": ["Python", "Java", "JavaScript", "HTML", "CSS", "SQL", "C++", "C#", "React", "Angular", "Vue",
122
+ "PHP", "Ruby", "Swift", "Kotlin", "Go", "TypeScript", "Node.js", "jQuery", "Bootstrap"],
123
+ "Data Science": ["Machine Learning", "Data Analysis", "Statistics", "TensorFlow", "PyTorch", "AI", "NLP",
124
+ "Data Mining", "Big Data", "Data Visualization", "Statistical Analysis", "R", "SPSS", "SAS",
125
+ "Regression", "Classification", "Clustering", "Neural Networks", "Deep Learning"],
126
+ "Database": ["SQL", "MySQL", "MongoDB", "PostgreSQL", "Oracle", "Redis", "DynamoDB", "SQLite", "NoSQL",
127
+ "Database Design", "SQL Server", "Database Administration", "ETL", "Data Warehousing"],
128
+ "Web Dev": ["React", "Angular", "Node.js", "Frontend", "Backend", "Full-Stack", "REST API", "GraphQL",
129
+ "Web Development", "WordPress", "Drupal", "CMS", "SEO", "UI/UX", "Responsive Design", "AJAX"],
130
+ "Software Dev": ["Agile", "Scrum", "Git", "DevOps", "Docker", "CI/CD", "Jenkins", "Software Development",
131
+ "Object-Oriented Programming", "Design Patterns", "Testing", "QA", "Software Architecture",
132
+ "Version Control", "JIRA", "Microservices", "Code Review", "Debugging"],
133
+ "Cloud": ["AWS", "Azure", "Google Cloud", "Lambda", "S3", "EC2", "Cloud Computing", "Serverless",
134
+ "Infrastructure as Code", "Cloud Architecture", "Cloud Security", "Kubernetes", "Load Balancing"],
135
+ "Business": ["Project Management", "Leadership", "Teamwork", "Agile", "Scrum", "Business Analysis",
136
+ "Requirements Gathering", "Client Relations", "Communication", "Presentation", "Meeting Facilitation",
137
+ "Strategic Planning", "Process Improvement", "Problem Solving", "Decision Making", "Stakeholder Management"]
138
  }
139
 
140
  text_lower = text.lower()
141
+
142
+ # Method 1: Look for exact matches
143
+ exact_skills = [skill for _, skills in skill_keywords.items() for skill in skills if skill.lower() in text_lower]
144
+
145
+ # Method 2: Use regex for more flexible matching (accounts for variations)
146
+ more_skills = []
147
+ for category, skills in skill_keywords.items():
148
+ for skill in skills:
149
+ # This handles cases like "Python developer" or "experienced in Python"
150
+ if re.search(r'\b' + re.escape(skill.lower()) + r'(?:\s|\b|ing|er|ed)', text_lower):
151
+ more_skills.append(skill)
152
+
153
+ # Combine both methods and remove duplicates
154
+ all_skills = list(set(exact_skills + more_skills))
155
+
156
+ # Add soft skill detection
157
+ soft_skills = ["Communication", "Teamwork", "Problem Solving", "Critical Thinking",
158
+ "Leadership", "Organization", "Time Management", "Flexibility", "Adaptability"]
159
+
160
+ for skill in soft_skills:
161
+ if skill.lower() in text_lower or re.search(r'\b' + re.escape(skill.lower()) + r'(?:\s|$)', text_lower):
162
+ all_skills.append(skill)
163
+
164
+ return all_skills
165
 
166
  @lru_cache(maxsize=32)
167
  def extract_name(text_start):
168
+ lines = [line.strip() for line in text_start.split('\n')[:5] if line.strip()]
 
 
169
 
170
+ if lines:
171
+ first_line = lines[0]
172
+ if 5 <= len(first_line) <= 40 and not any(x in first_line.lower() for x in ["resume", "cv", "curriculum", "vitae"]):
173
  return first_line
174
 
175
+ for line in lines[:3]:
176
  if len(line.split()) <= 4 and not any(x in line.lower() for x in ["address", "phone", "email", "resume", "cv"]):
177
  return line
178
+ return "Unknown"
 
179
 
180
  def extract_age(text):
181
+ for pattern in [r'age:?\s*(\d{1,2})', r'(\d{1,2})\s*years\s*old', r'dob:.*(\d{4})', r'date of birth:.*(\d{4})']:
182
+ match = re.search(pattern, text.lower())
183
+ if match:
184
+ if len(match.group(1)) == 4: # Birth year
185
+ try: return str(2025 - int(match.group(1)))
186
+ except: pass
187
+ return match.group(1)
 
 
 
 
 
 
 
 
 
 
 
 
 
188
  return "Not specified"
189
 
190
  def extract_industry(text):
191
+ industries = {
 
192
  "Technology": ["software", "programming", "developer", "IT", "tech", "computer", "digital"],
193
  "Finance": ["banking", "financial", "accounting", "finance", "analyst"],
194
+ "Healthcare": ["medical", "health", "hospital", "clinical", "nurse", "doctor"],
195
+ "Education": ["teaching", "teacher", "professor", "education", "university", "school"],
196
  "Marketing": ["marketing", "advertising", "digital marketing", "social media", "brand"],
197
  "Engineering": ["engineer", "engineering", "mechanical", "civil", "electrical"],
198
  "Data Science": ["data science", "machine learning", "AI", "analytics", "big data"],
199
+ "Management": ["manager", "management", "leadership", "executive", "director"]
 
 
200
  }
201
 
202
  text_lower = text.lower()
203
+ counts = {ind: sum(text_lower.count(kw) for kw in kws) for ind, kws in industries.items()}
204
+ return max(counts.items(), key=lambda x: x[1])[0] if any(counts.values()) else "Not specified"
 
 
205
 
206
  def extract_job_position(text):
 
 
 
 
 
 
 
 
 
207
  text_lower = text.lower()
208
+ for pattern in [r'objective:?\s*(.*?)(?=\n\n|\n\w+:|\Z)', r'career\s*objective:?\s*(.*?)(?=\n\n|\n\w+:|\Z)',
209
+ r'summary:?\s*(.*?)(?=\n\n|\n\w+:|\Z)', r'seeking.*position.*as\s*([^.]*)']:
210
  match = re.search(pattern, text_lower, re.IGNORECASE | re.DOTALL)
211
  if match:
212
+ text = match.group(1).strip()
213
+ for title in ["developer", "engineer", "analyst", "manager", "specialist", "designer"]:
214
+ if title in text:
215
+ return next((m.group(1).strip().title() for m in
216
+ [re.search(r'(\w+\s+' + title + r')', text)] if m), title.title())
217
+ return " ".join(text.split()[:10]).title() + "..." if len(text.split()) > 10 else text.title()
218
+
219
+ # Check for job title near experience
220
+ for pattern in [r'experience:.*?(\w+\s+\w+(?:\s+\w+)?)(?=\s*at|\s*\()', r'(\w+\s+\w+(?:\s+\w+)?)\s*\(\s*(?:current|present)']:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
221
  match = re.search(pattern, text_lower, re.IGNORECASE)
222
+ if match: return match.group(1).strip().title()
 
223
 
224
+ return "Not specified"
225
 
226
  #####################################
227
  # Core Analysis Functions
228
  #####################################
229
  def summarize_resume_text(resume_text, models):
230
+ start = time.time()
 
231
 
232
+ # Basic info extraction
233
  name = extract_name(resume_text[:500])
234
  age = extract_age(resume_text)
235
  industry = extract_industry(resume_text)
236
  job_position = extract_job_position(resume_text)
237
  skills = extract_skills(resume_text)
238
 
239
+ # Generate summary
240
  try:
241
  if has_pipeline and 'summarizer' in models:
242
+ model_summary = models['summarizer'](resume_text[:2000], max_length=100, min_length=30)[0]['summary_text']
243
  else:
244
  model_summary = summarize_text(resume_text, models, max_length=100)
245
+ except:
246
+ model_summary = "Error generating summary."
247
+
248
+ # Format result
249
+ summary = f"Name: {name}\n\nAge: {age}\n\nExpected Industry: {industry}\n\n"
250
+ summary += f"Expected Job Position: {job_position}\n\nSkills: {', '.join(skills)}\n\nSummary: {model_summary}"
251
+
252
+ return summary, time.time() - start
 
 
 
 
 
253
 
254
  def extract_job_requirements(job_description, models):
255
+ # Expanded technical skills list for better matching
 
256
  tech_skills = [
257
+ "Python", "Java", "JavaScript", "SQL", "HTML", "CSS", "React", "Angular", "Vue", "Node.js",
258
+ "Machine Learning", "Data Science", "AI", "Deep Learning", "NLP", "Statistics", "TensorFlow",
259
+ "AWS", "Azure", "Google Cloud", "Docker", "Kubernetes", "CI/CD", "DevOps",
260
+ "MySQL", "MongoDB", "PostgreSQL", "Oracle", "NoSQL", "Database", "Data Analysis",
261
+ "Project Management", "Agile", "Scrum", "Leadership", "Communication", "Teamwork",
262
+ "Git", "Software Development", "Full Stack", "Frontend", "Backend", "RESTful API",
263
+ "Mobile Development", "Android", "iOS", "Swift", "Kotlin", "React Native", "Flutter",
264
+ "Business Analysis", "Requirements", "UX/UI", "Design", "Product Management",
265
+ "Testing", "QA", "Security", "Cloud Computing", "Networking", "System Administration",
266
+ "Linux", "Windows", "Excel", "PowerPoint", "Word", "Microsoft Office",
267
+ "Problem Solving", "Critical Thinking", "Analytical Skills"
268
  ]
269
 
270
+ clean_text = job_description.lower()
271
 
272
  # Extract job title
 
 
 
 
 
 
273
  job_title = "Not specified"
274
+ for pattern in [r'^([^:.\n]+?)(position|role|job)', r'^([^:.\n]+?)\n', r'hiring.*? ([^:.\n]+?)(:-|[.:]|\n|$)']:
275
+ match = re.search(pattern, clean_text, re.IGNORECASE)
276
+ if match:
277
+ title = match.group(1).strip() if len(match.groups()) >= 1 else match.group(2).strip()
278
+ if 3 <= len(title) <= 50:
279
+ job_title = title.capitalize()
280
  break
281
 
282
+ # Extract years required
 
 
 
 
 
283
  years_required = 0
284
+ for pattern in [r'(\d+)(?:\+)?\s*(?:years|yrs).*?experience', r'experience.*?(\d+)(?:\+)?\s*(?:years|yrs)']:
285
+ match = re.search(pattern, clean_text, re.IGNORECASE)
286
+ if match:
287
  try:
288
+ years_required = int(match.group(1))
289
  break
290
+ except: pass
 
291
 
292
+ # Extract skills
293
+ required_skills = [skill for skill in tech_skills if re.search(r'\b' + re.escape(skill.lower()) + r'\b', clean_text)]
294
 
295
  # Fallback if no skills found
296
  if not required_skills:
297
+ words = [w for w in re.findall(r'\b\w{4,}\b', clean_text)
298
+ if w not in ["with", "that", "this", "have", "from", "they", "will", "what", "your"]]
299
  word_counts = {}
300
+ for w in words: word_counts[w] = word_counts.get(w, 0) + 1
301
+ required_skills = [w.capitalize() for w, _ in sorted(word_counts.items(), key=lambda x: x[1], reverse=True)[:5]]
 
 
 
 
 
302
 
303
  return {
304
  "title": job_title,
305
  "years_experience": years_required,
306
  "required_skills": required_skills,
307
+ "summary": summarize_text(job_description, models, max_length=100)
308
  }
309
 
310
  def evaluate_job_fit(resume_summary, job_requirements, models):
311
+ start = time.time()
 
312
 
313
+ # Basic extraction
314
  required_skills = job_requirements["required_skills"]
315
  years_required = job_requirements["years_experience"]
316
  job_title = job_requirements["title"]
317
  skills_mentioned = extract_skills(resume_summary)
318
 
319
+ # Calculate matches - IMPROVED MATCHING ALGORITHM
320
  matching_skills = [skill for skill in required_skills if skill in skills_mentioned]
 
 
 
 
 
 
 
 
 
 
 
321
 
322
+ # More balanced skill match calculation:
323
+ # - If no required skills, default to 0.5 (neutral)
324
+ # - Otherwise calculate percentage but with diminishing returns
325
+ if not required_skills:
326
+ skill_match = 0.5
327
+ else:
328
+ raw_match = len(matching_skills) / len(required_skills)
329
+ # Apply a more gradual scaling to avoid big jumps
330
+ skill_match = raw_match ** 0.7 # Using power < 1 gives more weight to partial matches
331
 
332
+ # Extract experience
333
+ years_experience = 0
334
+ exp_match = re.search(r'(\d+)\+?\s*years?\s*(?:of)?\s*experience', resume_summary, re.IGNORECASE)
335
+ if exp_match:
336
+ try: years_experience = int(exp_match.group(1))
337
+ except: pass
338
+
339
+ # Calculate scores with smoother transitions
340
+ # Experience matching: more balanced, handles the case where job requires no experience
341
+ if years_required == 0:
342
+ # If no experience required, having 1+ years is good, 0 is neutral
343
+ exp_match_ratio = min(1.0, years_experience / 2 + 0.5)
344
+ else:
345
+ # For jobs requiring experience, use a more gradual scale
346
+ exp_match_ratio = min(1.0, (years_experience / max(1, years_required)) ** 0.8)
347
 
348
+ # Title matching - improved to find partial matches
349
+ title_words = [w for w in job_title.lower().split() if len(w) > 3]
350
+ if not title_words:
351
+ title_match = 0.5 # Neutral if no meaningful title words
352
+ else:
353
+ matches = 0
354
+ for word in title_words:
355
+ if word in resume_summary.lower():
356
+ matches += 1
357
+ # Look for similar words (prefixes) for partial matching
358
+ elif any(w.startswith(word[:4]) for w in resume_summary.lower().split() if len(w) > 3):
359
+ matches += 0.5
360
+ title_match = matches / len(title_words)
361
+
362
+ # Calculate final scores with more reasonable ranges
363
+ skill_score = skill_match * 2.0 # 0-2 scale
364
+ exp_score = exp_match_ratio * 2.0 # 0-2 scale
365
+ title_score = title_match * 2.0 # 0-2 scale
366
 
367
  # Extract candidate info
368
+ name = re.search(r'Name:\s*(.*?)(?=\n|\Z)', resume_summary)
369
+ name = name.group(1).strip() if name else "The candidate"
370
 
371
+ industry = re.search(r'Expected Industry:\s*(.*?)(?=\n|\Z)', resume_summary)
372
+ industry = industry.group(1).strip() if industry else "unspecified industry"
373
 
374
+ # Calculate weighted score - ADJUSTED WEIGHTS
375
+ weighted_score = (skill_score * 0.45) + (exp_score * 0.35) + (title_score * 0.20)
376
 
377
+ # IMPROVED THRESHOLDS to get more "Potential Fit" results
378
+ # Good Fit: 1.25+ (was 1.5)
379
+ # Potential Fit: 0.6-1.25 (was 0.8-1.5)
380
+ # No Fit: <0.6 (was <0.8)
381
+ if weighted_score >= 1.25:
382
  fit_score = 2 # Good fit
383
+ elif weighted_score >= 0.6:
384
+ fit_score = 1 # Potential fit - wider range
385
  else:
386
  fit_score = 0 # Not a fit
387
 
388
+ # Add logging to help debug the scoring
389
+ st.session_state['debug_scores'] = {
390
+ 'skill_match': skill_match,
391
+ 'skill_score': skill_score,
392
+ 'exp_match_ratio': exp_match_ratio,
393
+ 'exp_score': exp_score,
394
+ 'title_match': title_match,
395
+ 'title_score': title_score,
396
+ 'weighted_score': weighted_score,
397
+ 'fit_score': fit_score,
398
+ 'matching_skills': matching_skills,
399
+ 'required_skills': required_skills
400
+ }
401
+
402
+ # Generate assessment
403
+ missing = [skill for skill in required_skills if skill not in skills_mentioned]
404
 
405
  if fit_score == 2:
406
+ assessment = f"{fit_score}: GOOD FIT - {name} demonstrates strong alignment with the {job_title} position. Their background in {industry} appears well-suited for this role's requirements."
407
  elif fit_score == 1:
408
+ assessment = f"{fit_score}: POTENTIAL FIT - {name} shows potential for the {job_title} role but has gaps in certain areas. Additional training might be needed in {', '.join(missing[:2])}."
409
  else:
410
+ assessment = f"{fit_score}: NO FIT - {name}'s background shows limited alignment with this {job_title} position. Their experience and skills differ significantly from the requirements."
411
 
412
+ return assessment, fit_score, time.time() - start
413
 
414
  def analyze_job_fit(resume_summary, job_description, models):
415
+ start = time.time()
 
416
  job_requirements = extract_job_requirements(job_description, models)
417
+ assessment, fit_score, _ = evaluate_job_fit(resume_summary, job_requirements, models)
418
+ return assessment, fit_score, time.time() - start
419
 
420
  #####################################
421
  # Main Function
422
  #####################################
423
  def main():
424
+ # Initialize session state for debug info
425
+ if 'debug_scores' not in st.session_state:
426
+ st.session_state['debug_scores'] = {}
427
+
428
  st.title("Resume-Job Fit Analyzer")
429
+ st.markdown("Upload your resume file in **.docx**, **.doc**, or **.txt** format and enter a job description to see how well you match.")
430
 
431
+ # Load models and get inputs
432
  models = load_models()
433
+ uploaded_file = st.file_uploader("Upload your resume", type=["docx", "doc", "txt"])
 
 
434
  job_description = st.text_area("Enter Job Description", height=200, placeholder="Paste the job description here...")
435
 
436
  # Process when button clicked
437
+ if uploaded_file and job_description and st.button("Analyze Job Fit"):
438
+ progress = st.progress(0)
439
+ status = st.empty()
440
 
441
  # Step 1: Extract text
442
+ status.text("Step 1/3: Extracting text from resume...")
443
  resume_text = extract_text_from_file(uploaded_file)
444
+ progress.progress(25)
445
 
446
  if resume_text.startswith("Error") or resume_text == "Unsupported file type. Please upload a .docx, .doc, or .txt file.":
447
  st.error(resume_text)
448
  else:
449
  # Step 2: Generate summary
450
+ status.text("Step 2/3: Analyzing resume...")
451
+ summary, summary_time = summarize_resume_text(resume_text, models)
452
+ progress.progress(50)
 
 
453
  st.subheader("Your Resume Summary")
454
  st.markdown(summary)
455
 
456
+ # Step 3: Evaluate fit
457
+ status.text("Step 3/3: Evaluating job fit...")
458
+ assessment, fit_score, eval_time = analyze_job_fit(summary, job_description, models)
459
+ progress.progress(100)
460
+ status.empty()
461
 
462
  # Display results
463
  st.subheader("Job Fit Assessment")
 
 
464
  fit_labels = {0: "NOT FIT", 1: "POTENTIAL FIT", 2: "GOOD FIT"}
465
+ colors = {0: "red", 1: "orange", 2: "green"}
466
+ st.markdown(f"<h2 style='color: {colors[fit_score]};'>{fit_labels[fit_score]}</h2>", unsafe_allow_html=True)
467
  st.markdown(assessment)
468
+ st.info(f"Analysis completed in {(summary_time + eval_time):.2f} seconds")
469
 
470
  # Recommendations
471
  st.subheader("Recommended Next Steps")
 
472
  if fit_score == 2:
473
  st.markdown("""
474
  - Apply for this position as you appear to be a good match
 
487
  - If interested in this field, focus on developing the required skills
488
  - Consider similar roles with fewer experience requirements
489
  """)
490
+
491
+ # Show debug scores if needed (uncomment this to debug scoring)
492
+ # st.subheader("Debug Information")
493
+ # st.json(st.session_state['debug_scores'])
494
 
495
  if __name__ == "__main__":
496
  main()