CR7CAD commited on
Commit
c2290eb
·
verified ·
1 Parent(s): 8d475a5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +290 -160
app.py CHANGED
@@ -1,6 +1,11 @@
1
- import os, io, re, time, tempfile
 
2
  import streamlit as st
3
- import docx, docx2txt
 
 
 
 
4
  import pandas as pd
5
  from functools import lru_cache
6
 
@@ -12,21 +17,24 @@ except ImportError:
12
  from transformers import AutoModelForSequenceClassification, AutoTokenizer, AutoModelForSeq2SeqLM
13
  import torch
14
  has_pipeline = False
 
15
 
16
- # Setup page
17
  st.set_page_config(page_title="Resume-Job Fit Analyzer", initial_sidebar_state="collapsed")
18
- st.markdown("""<style>[data-testid="collapsedControl"],[data-testid="stSidebar"] {display: none;}</style>""", unsafe_allow_html=True)
19
 
20
  #####################################
21
- # Model Loading & Text Processing
22
  #####################################
23
- @st.cache_resource
24
  def load_models():
25
- with st.spinner("Loading AI models..."):
 
26
  models = {}
 
27
  # Load summarization model
28
  if has_pipeline:
29
- models['summarizer'] = pipeline("summarization", model="Falconsai/text_summarization", max_length=100)
30
  else:
31
  try:
32
  models['summarizer_model'] = AutoModelForSeq2SeqLM.from_pretrained("Falconsai/text_summarization")
@@ -45,331 +53,453 @@ def load_models():
45
  except Exception as e:
46
  st.error(f"Error loading sentiment model: {e}")
47
  models['evaluator_model'] = models['evaluator_tokenizer'] = None
 
48
  return models
49
 
50
  def summarize_text(text, models, max_length=100):
51
- """Summarize text with fallbacks"""
 
52
  input_text = text[:1024]
53
 
54
- # Try pipeline
55
  if has_pipeline and 'summarizer' in models:
56
  try:
57
  return models['summarizer'](input_text)[0]['summary_text']
58
- except: pass
 
59
 
60
  # Try manual model
61
- if 'summarizer_model' in models and models['summarizer_model']:
62
  try:
63
  tokenizer = models['summarizer_tokenizer']
64
  model = models['summarizer_model']
65
  inputs = tokenizer(input_text, return_tensors="pt", truncation=True, max_length=1024)
66
- summary_ids = model.generate(inputs.input_ids, max_length=max_length, min_length=30, num_beams=4)
67
  return tokenizer.decode(summary_ids[0], skip_special_tokens=True)
68
- except: pass
 
69
 
70
- # Fallback - extract sentences
 
 
 
 
71
  sentences = re.split(r'(?<!\w\.\w.)(?<![A-Z][a-z]\.)(?<=\.|\?)\s', text)
72
- scored = [(1.0/(i+1), s) for i, s in enumerate(sentences) if len(s.split()) >= 4]
73
- scored.sort(reverse=True)
74
-
75
- result, length = [], 0
76
- for _, sentence in scored:
77
- if length + len(sentence.split()) <= max_length:
78
- result.append(sentence)
79
- length += len(sentence.split())
80
-
81
- if result:
82
- ordered = sorted([(sentences.index(s), s) for s in result])
83
- return " ".join(s for _, s in ordered)
84
- return ""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
85
 
86
  #####################################
87
- # File Processing & Information Extraction
88
  #####################################
89
- @st.cache_data
90
  def extract_text_from_file(file_obj):
91
- ext = os.path.splitext(file_obj.name)[1].lower()
 
 
92
 
93
  if ext == ".docx":
94
  try:
95
  document = docx.Document(file_obj)
96
- return "\n".join(para.text for para in document.paragraphs if para.text.strip())[:15000]
97
  except Exception as e:
98
  return f"Error processing DOCX file: {e}"
99
  elif ext == ".doc":
100
  try:
101
  with tempfile.NamedTemporaryFile(delete=False, suffix='.doc') as temp_file:
102
  temp_file.write(file_obj.getvalue())
103
- text = docx2txt.process(temp_file.name)
104
- os.unlink(temp_file.name)
105
- return text[:15000]
 
106
  except Exception as e:
107
  return f"Error processing DOC file: {e}"
108
  elif ext == ".txt":
109
  try:
110
- return file_obj.getvalue().decode("utf-8")[:15000]
111
  except Exception as e:
112
  return f"Error processing TXT file: {e}"
113
  else:
114
  return "Unsupported file type. Please upload a .docx, .doc, or .txt file."
 
 
115
 
116
- # Information extraction functions
117
  def extract_skills(text):
118
- """Extract skills from text"""
119
  skill_keywords = {
120
- "Programming": ["Python", "Java", "JavaScript", "HTML", "CSS", "SQL", "C++", "C#", "React", "Angular"],
121
- "Data Science": ["Machine Learning", "Data Analysis", "Statistics", "TensorFlow", "PyTorch", "AI", "NLP"],
122
- "Database": ["SQL", "MySQL", "MongoDB", "PostgreSQL", "Oracle", "Redis"],
123
- "Web Dev": ["React", "Angular", "Node.js", "Frontend", "Backend", "Full-Stack", "REST API"],
124
- "Software Dev": ["Agile", "Scrum", "Git", "DevOps", "Docker", "CI/CD", "Jenkins"],
125
- "Cloud": ["AWS", "Azure", "Google Cloud", "Lambda", "S3", "EC2"],
126
- "Business": ["Project Management", "Leadership", "Teamwork", "Agile", "Scrum"]
 
 
127
  }
128
 
129
  text_lower = text.lower()
130
- return [skill for _, skills in skill_keywords.items() for skill in skills if skill.lower() in text_lower]
 
131
 
132
  @lru_cache(maxsize=32)
133
  def extract_name(text_start):
134
- lines = [line.strip() for line in text_start.split('\n')[:5] if line.strip()]
 
 
135
 
136
- if lines:
137
- first_line = lines[0]
138
- if 5 <= len(first_line) <= 40 and not any(x in first_line.lower() for x in ["resume", "cv", "curriculum", "vitae"]):
139
  return first_line
140
 
141
- for line in lines[:3]:
142
  if len(line.split()) <= 4 and not any(x in line.lower() for x in ["address", "phone", "email", "resume", "cv"]):
143
  return line
144
- return "Unknown"
 
145
 
146
  def extract_age(text):
147
- for pattern in [r'age:?\s*(\d{1,2})', r'(\d{1,2})\s*years\s*old', r'dob:.*(\d{4})', r'date of birth:.*(\d{4})']:
148
- match = re.search(pattern, text.lower())
149
- if match:
150
- if len(match.group(1)) == 4: # Birth year
151
- try: return str(2025 - int(match.group(1)))
152
- except: pass
153
- return match.group(1)
 
 
 
 
 
 
 
 
 
 
 
 
 
154
  return "Not specified"
155
 
156
  def extract_industry(text):
157
- industries = {
 
158
  "Technology": ["software", "programming", "developer", "IT", "tech", "computer", "digital"],
159
  "Finance": ["banking", "financial", "accounting", "finance", "analyst"],
160
- "Healthcare": ["medical", "health", "hospital", "clinical", "nurse", "doctor"],
161
- "Education": ["teaching", "teacher", "professor", "education", "university", "school"],
162
  "Marketing": ["marketing", "advertising", "digital marketing", "social media", "brand"],
163
  "Engineering": ["engineer", "engineering", "mechanical", "civil", "electrical"],
164
  "Data Science": ["data science", "machine learning", "AI", "analytics", "big data"],
165
- "Management": ["manager", "management", "leadership", "executive", "director"]
 
 
166
  }
167
 
168
  text_lower = text.lower()
169
- counts = {ind: sum(text_lower.count(kw) for kw in kws) for ind, kws in industries.items()}
170
- return max(counts.items(), key=lambda x: x[1])[0] if any(counts.values()) else "Not specified"
 
 
171
 
172
  def extract_job_position(text):
 
 
 
 
 
 
 
 
 
173
  text_lower = text.lower()
174
- for pattern in [r'objective:?\s*(.*?)(?=\n\n|\n\w+:|\Z)', r'career\s*objective:?\s*(.*?)(?=\n\n|\n\w+:|\Z)',
175
- r'summary:?\s*(.*?)(?=\n\n|\n\w+:|\Z)', r'seeking.*position.*as\s*([^.]*)']:
176
  match = re.search(pattern, text_lower, re.IGNORECASE | re.DOTALL)
177
  if match:
178
- text = match.group(1).strip()
179
- for title in ["developer", "engineer", "analyst", "manager", "specialist", "designer"]:
180
- if title in text:
181
- return next((m.group(1).strip().title() for m in
182
- [re.search(r'(\w+\s+' + title + r')', text)] if m), title.title())
183
- return " ".join(text.split()[:10]).title() + "..." if len(text.split()) > 10 else text.title()
184
-
185
- # Check for job title near experience
186
- for pattern in [r'experience:.*?(\w+\s+\w+(?:\s+\w+)?)(?=\s*at|\s*\()', r'(\w+\s+\w+(?:\s+\w+)?)\s*\(\s*(?:current|present)']:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
187
  match = re.search(pattern, text_lower, re.IGNORECASE)
188
- if match: return match.group(1).strip().title()
 
189
 
190
- return "Not specified"
191
 
192
  #####################################
193
  # Core Analysis Functions
194
  #####################################
195
  def summarize_resume_text(resume_text, models):
196
- start = time.time()
 
197
 
198
- # Basic info extraction
199
  name = extract_name(resume_text[:500])
200
  age = extract_age(resume_text)
201
  industry = extract_industry(resume_text)
202
  job_position = extract_job_position(resume_text)
203
  skills = extract_skills(resume_text)
204
 
205
- # Generate summary
206
  try:
207
  if has_pipeline and 'summarizer' in models:
208
- model_summary = models['summarizer'](resume_text[:2000], max_length=100, min_length=30)[0]['summary_text']
209
  else:
210
  model_summary = summarize_text(resume_text, models, max_length=100)
211
- except:
212
- model_summary = "Error generating summary."
213
-
214
- # Format result
215
- summary = f"Name: {name}\n\nAge: {age}\n\nExpected Industry: {industry}\n\n"
216
- summary += f"Expected Job Position: {job_position}\n\nSkills: {', '.join(skills)}\n\nSummary: {model_summary}"
217
-
218
- return summary, time.time() - start
 
 
 
 
 
219
 
220
  def extract_job_requirements(job_description, models):
 
 
221
  tech_skills = [
222
- "Python", "Java", "JavaScript", "SQL", "HTML", "CSS", "React", "Angular", "Machine Learning", "AWS",
223
- "Azure", "Docker", "MySQL", "MongoDB", "Project Management", "Agile", "Leadership", "Git", "DevOps"
 
 
224
  ]
225
 
226
- clean_text = job_description.lower()
227
 
228
  # Extract job title
 
 
 
 
 
 
229
  job_title = "Not specified"
230
- for pattern in [r'^([^:.\n]+?)(position|role|job)', r'^([^:.\n]+?)\n', r'hiring.*? ([^:.\n]+?)(:-|[.:]|\n|$)']:
231
- match = re.search(pattern, clean_text, re.IGNORECASE)
232
- if match:
233
- title = match.group(1).strip() if len(match.groups()) >= 1 else match.group(2).strip()
234
- if 3 <= len(title) <= 50:
235
- job_title = title.capitalize()
236
  break
237
 
238
- # Extract years required
 
 
 
 
 
239
  years_required = 0
240
- for pattern in [r'(\d+)(?:\+)?\s*(?:years|yrs).*?experience', r'experience.*?(\d+)(?:\+)?\s*(?:years|yrs)']:
241
- match = re.search(pattern, clean_text, re.IGNORECASE)
242
- if match:
243
  try:
244
- years_required = int(match.group(1))
245
  break
246
- except: pass
 
247
 
248
- # Extract skills
249
- required_skills = [skill for skill in tech_skills if re.search(r'\b' + re.escape(skill.lower()) + r'\b', clean_text)]
250
 
251
  # Fallback if no skills found
252
  if not required_skills:
253
- words = [w for w in re.findall(r'\b\w{4,}\b', clean_text)
254
- if w not in ["with", "that", "this", "have", "from", "they", "will", "what", "your"]]
255
  word_counts = {}
256
- for w in words: word_counts[w] = word_counts.get(w, 0) + 1
257
- required_skills = [w.capitalize() for w, _ in sorted(word_counts.items(), key=lambda x: x[1], reverse=True)[:5]]
 
 
 
 
 
258
 
259
  return {
260
  "title": job_title,
261
  "years_experience": years_required,
262
  "required_skills": required_skills,
263
- "summary": summarize_text(job_description, models, max_length=100)
264
  }
265
 
266
  def evaluate_job_fit(resume_summary, job_requirements, models):
267
- start = time.time()
 
268
 
269
- # Basic extraction
270
  required_skills = job_requirements["required_skills"]
271
  years_required = job_requirements["years_experience"]
272
  job_title = job_requirements["title"]
273
  skills_mentioned = extract_skills(resume_summary)
274
 
275
- # Calculate matches
276
  matching_skills = [skill for skill in required_skills if skill in skills_mentioned]
277
- skill_match = len(matching_skills) / len(required_skills) if required_skills else 0
278
 
279
- # Extract experience
 
280
  years_experience = 0
281
- exp_match = re.search(r'(\d+)\+?\s*years?\s*(?:of)?\s*experience', resume_summary, re.IGNORECASE)
282
- if exp_match:
283
- try: years_experience = int(exp_match.group(1))
284
- except: pass
 
 
285
 
286
- # Calculate scores
287
  exp_match_ratio = min(1.0, years_experience / max(1, years_required)) if years_required > 0 else 0.5
288
- title_words = [w for w in job_title.lower().split() if len(w) > 3]
289
- title_match = sum(1 for w in title_words if w in resume_summary.lower()) / len(title_words) if title_words else 0
290
 
291
- # Final scores
292
- skill_score = min(2, skill_match * 3)
 
 
 
 
 
293
  exp_score = min(2, exp_match_ratio * 2)
294
  title_score = min(2, title_match * 2)
295
 
296
  # Extract candidate info
297
- name = re.search(r'Name:\s*(.*?)(?=\n|\Z)', resume_summary)
298
- name = name.group(1).strip() if name else "The candidate"
299
 
300
- industry = re.search(r'Expected Industry:\s*(.*?)(?=\n|\Z)', resume_summary)
301
- industry = industry.group(1).strip() if industry else "unspecified industry"
302
 
303
- # Calculate weighted score
304
  weighted_score = (skill_score * 0.5) + (exp_score * 0.3) + (title_score * 0.2)
305
- fit_score = 2 if weighted_score >= 1.5 else (1 if weighted_score >= 0.8 else 0)
306
 
307
- # Generate assessment
308
- missing = [skill for skill in required_skills if skill not in skills_mentioned]
 
 
 
 
 
 
 
 
309
 
310
  if fit_score == 2:
311
- assessment = f"{fit_score}: GOOD FIT - {name} demonstrates strong alignment with the {job_title} position. Their background in {industry} appears well-suited for this role's requirements."
312
  elif fit_score == 1:
313
- assessment = f"{fit_score}: POTENTIAL FIT - {name} shows potential for the {job_title} role but has gaps in certain areas. Additional training might be needed in {', '.join(missing[:2])}."
314
  else:
315
- assessment = f"{fit_score}: NO FIT - {name}'s background shows limited alignment with this {job_title} position. Their experience and skills differ significantly from the requirements."
316
 
317
- return assessment, fit_score, time.time() - start
318
 
319
  def analyze_job_fit(resume_summary, job_description, models):
320
- start = time.time()
 
321
  job_requirements = extract_job_requirements(job_description, models)
322
- assessment, fit_score, _ = evaluate_job_fit(resume_summary, job_requirements, models)
323
- return assessment, fit_score, time.time() - start
324
 
325
  #####################################
326
  # Main Function
327
  #####################################
328
  def main():
 
329
  st.title("Resume-Job Fit Analyzer")
330
- st.markdown("Upload your resume file in **.docx**, **.doc**, or **.txt** format and enter a job description to see how well you match.")
331
 
332
- # Load models and get inputs
333
  models = load_models()
334
- uploaded_file = st.file_uploader("Upload your resume", type=["docx", "doc", "txt"])
 
 
335
  job_description = st.text_area("Enter Job Description", height=200, placeholder="Paste the job description here...")
336
 
337
  # Process when button clicked
338
- if uploaded_file and job_description and st.button("Analyze Job Fit"):
339
- progress = st.progress(0)
340
- status = st.empty()
341
 
342
  # Step 1: Extract text
343
- status.text("Step 1/3: Extracting text from resume...")
344
  resume_text = extract_text_from_file(uploaded_file)
345
- progress.progress(25)
346
 
347
  if resume_text.startswith("Error") or resume_text == "Unsupported file type. Please upload a .docx, .doc, or .txt file.":
348
  st.error(resume_text)
349
  else:
350
  # Step 2: Generate summary
351
- status.text("Step 2/3: Analyzing resume...")
352
- summary, summary_time = summarize_resume_text(resume_text, models)
353
- progress.progress(50)
 
 
354
  st.subheader("Your Resume Summary")
355
  st.markdown(summary)
356
 
357
- # Step 3: Evaluate fit
358
- status.text("Step 3/3: Evaluating job fit...")
359
- assessment, fit_score, eval_time = analyze_job_fit(summary, job_description, models)
360
- progress.progress(100)
361
- status.empty()
362
 
363
  # Display results
364
  st.subheader("Job Fit Assessment")
 
 
365
  fit_labels = {0: "NOT FIT", 1: "POTENTIAL FIT", 2: "GOOD FIT"}
366
- colors = {0: "red", 1: "orange", 2: "green"}
367
- st.markdown(f"<h2 style='color: {colors[fit_score]};'>{fit_labels[fit_score]}</h2>", unsafe_allow_html=True)
368
  st.markdown(assessment)
369
- st.info(f"Analysis completed in {(summary_time + eval_time):.2f} seconds")
370
 
371
  # Recommendations
372
  st.subheader("Recommended Next Steps")
 
373
  if fit_score == 2:
374
  st.markdown("""
375
  - Apply for this position as you appear to be a good match
 
1
+ import os
2
+ import io
3
  import streamlit as st
4
+ import docx
5
+ import docx2txt
6
+ import tempfile
7
+ import time
8
+ import re
9
  import pandas as pd
10
  from functools import lru_cache
11
 
 
17
  from transformers import AutoModelForSequenceClassification, AutoTokenizer, AutoModelForSeq2SeqLM
18
  import torch
19
  has_pipeline = False
20
+ st.warning("Using basic transformers functionality instead of pipeline API")
21
 
22
+ # Set page title and hide sidebar
23
  st.set_page_config(page_title="Resume-Job Fit Analyzer", initial_sidebar_state="collapsed")
24
+ st.markdown("""<style>[data-testid="collapsedControl"] {display: none;}section[data-testid="stSidebar"] {display: none;}</style>""", unsafe_allow_html=True)
25
 
26
  #####################################
27
+ # Preload Models & Helper Functions
28
  #####################################
29
+ @st.cache_resource(show_spinner=True)
30
  def load_models():
31
+ """Load models at startup"""
32
+ with st.spinner("Loading AI models... This may take a minute on first run."):
33
  models = {}
34
+
35
  # Load summarization model
36
  if has_pipeline:
37
+ models['summarizer'] = pipeline("summarization", model="Falconsai/text_summarization", max_length=100, truncation=True)
38
  else:
39
  try:
40
  models['summarizer_model'] = AutoModelForSeq2SeqLM.from_pretrained("Falconsai/text_summarization")
 
53
  except Exception as e:
54
  st.error(f"Error loading sentiment model: {e}")
55
  models['evaluator_model'] = models['evaluator_tokenizer'] = None
56
+
57
  return models
58
 
59
  def summarize_text(text, models, max_length=100):
60
+ """Summarize text using available models with fallbacks"""
61
+ # Truncate input to prevent issues with long texts
62
  input_text = text[:1024]
63
 
64
+ # Try pipeline first
65
  if has_pipeline and 'summarizer' in models:
66
  try:
67
  return models['summarizer'](input_text)[0]['summary_text']
68
+ except Exception as e:
69
+ st.warning(f"Error in pipeline summarization: {e}")
70
 
71
  # Try manual model
72
+ if 'summarizer_model' in models and 'summarizer_tokenizer' in models and models['summarizer_model']:
73
  try:
74
  tokenizer = models['summarizer_tokenizer']
75
  model = models['summarizer_model']
76
  inputs = tokenizer(input_text, return_tensors="pt", truncation=True, max_length=1024)
77
+ summary_ids = model.generate(inputs.input_ids, max_length=max_length, min_length=30, num_beams=4, early_stopping=True)
78
  return tokenizer.decode(summary_ids[0], skip_special_tokens=True)
79
+ except Exception as e:
80
+ st.warning(f"Error in manual summarization: {e}")
81
 
82
+ # Fallback to basic summarization
83
+ return basic_summarize(text, max_length)
84
+
85
+ def basic_summarize(text, max_length=100):
86
+ """Basic extractive text summarization"""
87
  sentences = re.split(r'(?<!\w\.\w.)(?<![A-Z][a-z]\.)(?<=\.|\?)\s', text)
88
+
89
+ # Score and filter sentences
90
+ scored_sentences = []
91
+ for i, sentence in enumerate(sentences):
92
+ if len(sentence.split()) >= 4:
93
+ score = 1.0 / (i + 1) - (0.01 * max(0, len(sentence.split()) - 20))
94
+ scored_sentences.append((score, sentence))
95
+
96
+ # Get top sentences
97
+ scored_sentences.sort(reverse=True)
98
+ summary_sentences = []
99
+ current_length = 0
100
+
101
+ for _, sentence in scored_sentences:
102
+ if current_length + len(sentence.split()) <= max_length:
103
+ summary_sentences.append(sentence)
104
+ current_length += len(sentence.split())
105
+ else:
106
+ break
107
+
108
+ # Restore original sentence order
109
+ if summary_sentences:
110
+ original_order = [(sentences.index(s), s) for s in summary_sentences]
111
+ original_order.sort()
112
+ summary_sentences = [s for _, s in original_order]
113
+
114
+ return " ".join(summary_sentences)
115
 
116
  #####################################
117
+ # Information Extraction Functions
118
  #####################################
119
+ @st.cache_data(show_spinner=False)
120
  def extract_text_from_file(file_obj):
121
+ """Extract text from uploaded document file"""
122
+ filename = file_obj.name
123
+ ext = os.path.splitext(filename)[1].lower()
124
 
125
  if ext == ".docx":
126
  try:
127
  document = docx.Document(file_obj)
128
+ text = "\n".join(para.text for para in document.paragraphs if para.text.strip())
129
  except Exception as e:
130
  return f"Error processing DOCX file: {e}"
131
  elif ext == ".doc":
132
  try:
133
  with tempfile.NamedTemporaryFile(delete=False, suffix='.doc') as temp_file:
134
  temp_file.write(file_obj.getvalue())
135
+ temp_path = temp_file.name
136
+
137
+ text = docx2txt.process(temp_path)
138
+ os.unlink(temp_path)
139
  except Exception as e:
140
  return f"Error processing DOC file: {e}"
141
  elif ext == ".txt":
142
  try:
143
+ text = file_obj.getvalue().decode("utf-8")
144
  except Exception as e:
145
  return f"Error processing TXT file: {e}"
146
  else:
147
  return "Unsupported file type. Please upload a .docx, .doc, or .txt file."
148
+
149
+ return text[:15000] if text else text
150
 
 
151
  def extract_skills(text):
152
+ """Extract key skills from the resume"""
153
  skill_keywords = {
154
+ "Programming": ["Python", "Java", "JavaScript", "HTML", "CSS", "SQL", "C++", "C#", "Go", "React", "Angular", "Vue", "Node.js"],
155
+ "Data Science": ["Machine Learning", "Data Analysis", "Statistics", "TensorFlow", "PyTorch", "AI", "Algorithms", "NLP", "Deep Learning"],
156
+ "Database": ["SQL", "MySQL", "MongoDB", "Database", "NoSQL", "PostgreSQL", "Oracle", "Redis"],
157
+ "Web Development": ["React", "Angular", "Node.js", "Frontend", "Backend", "Full-Stack", "REST API", "GraphQL"],
158
+ "Software Development": ["Agile", "Scrum", "Git", "DevOps", "Docker", "System Design", "CI/CD", "Jenkins"],
159
+ "Cloud": ["AWS", "Azure", "Google Cloud", "Cloud Computing", "Lambda", "S3", "EC2"],
160
+ "Security": ["Cybersecurity", "Network Security", "Encryption", "Security"],
161
+ "Business": ["Project Management", "Business Analysis", "Leadership", "Teamwork", "Agile", "Scrum"],
162
+ "Design": ["UX/UI", "User Experience", "Design Thinking", "Adobe", "Figma"]
163
  }
164
 
165
  text_lower = text.lower()
166
+ return [skill for category, skills in skill_keywords.items()
167
+ for skill in skills if skill.lower() in text_lower]
168
 
169
  @lru_cache(maxsize=32)
170
  def extract_name(text_start):
171
+ """Extract candidate name from the beginning of resume text"""
172
+ lines = text_start.split('\n')
173
+ potential_name_lines = [line.strip() for line in lines[:5] if line.strip()]
174
 
175
+ if potential_name_lines:
176
+ first_line = potential_name_lines[0]
177
+ if 5 <= len(first_line) <= 40 and not any(x in first_line.lower() for x in ["resume", "cv", "curriculum", "vitae", "profile"]):
178
  return first_line
179
 
180
+ for line in potential_name_lines[:3]:
181
  if len(line.split()) <= 4 and not any(x in line.lower() for x in ["address", "phone", "email", "resume", "cv"]):
182
  return line
183
+
184
+ return "Unknown (please extract from resume)"
185
 
186
  def extract_age(text):
187
+ """Extract candidate age from resume text"""
188
+ age_patterns = [
189
+ r'age:?\s*(\d{1,2})',
190
+ r'(\d{1,2})\s*years\s*old',
191
+ r'dob:.*(\d{4})',
192
+ r'date of birth:.*(\d{4})'
193
+ ]
194
+
195
+ text_lower = text.lower()
196
+ for pattern in age_patterns:
197
+ matches = re.search(pattern, text_lower)
198
+ if matches:
199
+ # Convert birth year to age if needed
200
+ if len(matches.group(1)) == 4:
201
+ try:
202
+ return str(2025 - int(matches.group(1)))
203
+ except:
204
+ pass
205
+ return matches.group(1)
206
+
207
  return "Not specified"
208
 
209
  def extract_industry(text):
210
+ """Extract expected job industry from resume"""
211
+ industry_keywords = {
212
  "Technology": ["software", "programming", "developer", "IT", "tech", "computer", "digital"],
213
  "Finance": ["banking", "financial", "accounting", "finance", "analyst"],
214
+ "Healthcare": ["medical", "health", "hospital", "clinical", "nurse", "doctor", "patient"],
215
+ "Education": ["teaching", "teacher", "professor", "education", "university", "school", "academic"],
216
  "Marketing": ["marketing", "advertising", "digital marketing", "social media", "brand"],
217
  "Engineering": ["engineer", "engineering", "mechanical", "civil", "electrical"],
218
  "Data Science": ["data science", "machine learning", "AI", "analytics", "big data"],
219
+ "Management": ["manager", "management", "leadership", "executive", "director"],
220
+ "Consulting": ["consultant", "consulting", "advisor"],
221
+ "Sales": ["sales", "business development", "account manager", "client relations"]
222
  }
223
 
224
  text_lower = text.lower()
225
+ industry_counts = {industry: sum(text_lower.count(keyword.lower()) for keyword in keywords)
226
+ for industry, keywords in industry_keywords.items()}
227
+
228
+ return max(industry_counts.items(), key=lambda x: x[1])[0] if any(industry_counts.values()) else "Not clearly specified"
229
 
230
  def extract_job_position(text):
231
+ """Extract expected job position from resume"""
232
+ objective_patterns = [
233
+ r'objective:?\s*(.*?)(?=\n\n|\n\w+:|\Z)',
234
+ r'career\s*objective:?\s*(.*?)(?=\n\n|\n\w+:|\Z)',
235
+ r'professional\s*summary:?\s*(.*?)(?=\n\n|\n\w+:|\Z)',
236
+ r'summary:?\s*(.*?)(?=\n\n|\n\w+:|\Z)',
237
+ r'seeking\s*(?:a|an)?\s*(?:position|role|opportunity)\s*(?:as|in)?\s*(?:a|an)?\s*([^.]*)'
238
+ ]
239
+
240
  text_lower = text.lower()
241
+ for pattern in objective_patterns:
 
242
  match = re.search(pattern, text_lower, re.IGNORECASE | re.DOTALL)
243
  if match:
244
+ objective_text = match.group(1).strip()
245
+ job_titles = ["developer", "engineer", "analyst", "manager", "director", "specialist",
246
+ "coordinator", "consultant", "designer", "architect", "administrator"]
247
+
248
+ for title in job_titles:
249
+ if title in objective_text:
250
+ title_pattern = r'(?:a|an)?\s*(\w+\s+' + title + r'|\w+\s+\w+\s+' + title + r')'
251
+ title_match = re.search(title_pattern, objective_text)
252
+ if title_match:
253
+ return title_match.group(1).strip().title()
254
+ return title.title()
255
+
256
+ if len(objective_text) > 10:
257
+ words = objective_text.split()
258
+ return " ".join(words[:10]).title() + "..." if len(words) > 10 else objective_text.title()
259
+
260
+ job_patterns = [
261
+ r'experience:.*?(\w+\s+\w+(?:\s+\w+)?)(?=\s*at|\s*\(|\s*-|\s*,|\s*\d{4}|\n)',
262
+ r'(\w+\s+\w+(?:\s+\w+)?)\s*\(\s*current\s*\)',
263
+ r'(\w+\s+\w+(?:\s+\w+)?)\s*\(\s*present\s*\)'
264
+ ]
265
+
266
+ for pattern in job_patterns:
267
  match = re.search(pattern, text_lower, re.IGNORECASE)
268
+ if match:
269
+ return match.group(1).strip().title()
270
 
271
+ return "Not explicitly stated"
272
 
273
  #####################################
274
  # Core Analysis Functions
275
  #####################################
276
  def summarize_resume_text(resume_text, models):
277
+ """Generate a structured summary of resume text"""
278
+ start_time = time.time()
279
 
280
+ # Extract critical information
281
  name = extract_name(resume_text[:500])
282
  age = extract_age(resume_text)
283
  industry = extract_industry(resume_text)
284
  job_position = extract_job_position(resume_text)
285
  skills = extract_skills(resume_text)
286
 
287
+ # Generate overall summary
288
  try:
289
  if has_pipeline and 'summarizer' in models:
290
+ model_summary = models['summarizer'](resume_text[:2000], max_length=100, min_length=30, do_sample=False)[0]['summary_text']
291
  else:
292
  model_summary = summarize_text(resume_text, models, max_length=100)
293
+ except Exception as e:
294
+ st.warning(f"Error in resume summarization: {e}")
295
+ model_summary = "Error generating summary. Please check the original resume."
296
+
297
+ # Format the structured summary
298
+ formatted_summary = f"Name: {name}\n\n"
299
+ formatted_summary += f"Age: {age}\n\n"
300
+ formatted_summary += f"Expected Industry: {industry}\n\n"
301
+ formatted_summary += f"Expected Job Position: {job_position}\n\n"
302
+ formatted_summary += f"Skills: {', '.join(skills)}\n\n"
303
+ formatted_summary += f"Summary: {model_summary}"
304
+
305
+ return formatted_summary, time.time() - start_time
306
 
307
  def extract_job_requirements(job_description, models):
308
+ """Extract key requirements from a job description"""
309
+ # Combined skill list (abridged for brevity)
310
  tech_skills = [
311
+ "Python", "Java", "C++", "JavaScript", "TypeScript", "SQL", "HTML", "CSS", "React", "Angular",
312
+ "Machine Learning", "Data Science", "AI", "AWS", "Azure", "Docker", "Kubernetes", "MySQL",
313
+ "MongoDB", "PostgreSQL", "Project Management", "Agile", "Scrum", "Leadership", "Communication",
314
+ "Problem Solving", "Git", "DevOps", "Full Stack", "Mobile Development", "Android", "iOS"
315
  ]
316
 
317
+ clean_job_text = job_description.lower()
318
 
319
  # Extract job title
320
+ title_patterns = [
321
+ r'^([^:.\n]+?)(position|role|job|opening|vacancy)',
322
+ r'^([^:.\n]+?)\n',
323
+ r'(hiring|looking for(?: a| an)?|recruiting)(?: a| an)? ([^:.\n]+?)(:-|[.:]|\n|$)'
324
+ ]
325
+
326
  job_title = "Not specified"
327
+ for pattern in title_patterns:
328
+ title_match = re.search(pattern, clean_job_text, re.IGNORECASE)
329
+ if title_match:
330
+ potential_title = title_match.group(1).strip() if len(title_match.groups()) >= 1 else title_match.group(2).strip()
331
+ if 3 <= len(potential_title) <= 50:
332
+ job_title = potential_title.capitalize()
333
  break
334
 
335
+ # Extract years of experience
336
+ exp_patterns = [
337
+ r'(\d+)(?:\+)?\s*(?:years|yrs)(?:\s*of)?\s*(?:experience|exp)',
338
+ r'experience\s*(?:of)?\s*(\d+)(?:\+)?\s*(?:years|yrs)'
339
+ ]
340
+
341
  years_required = 0
342
+ for pattern in exp_patterns:
343
+ exp_match = re.search(pattern, clean_job_text, re.IGNORECASE)
344
+ if exp_match:
345
  try:
346
+ years_required = int(exp_match.group(1))
347
  break
348
+ except:
349
+ pass
350
 
351
+ # Extract required skills
352
+ required_skills = [skill for skill in tech_skills if re.search(r'\b' + re.escape(skill.lower()) + r'\b', clean_job_text)]
353
 
354
  # Fallback if no skills found
355
  if not required_skills:
356
+ words = re.findall(r'\b\w{4,}\b', clean_job_text)
 
357
  word_counts = {}
358
+ for word in words:
359
+ if word not in ["with", "that", "this", "have", "from", "they", "will", "what", "your", "their", "about"]:
360
+ word_counts[word] = word_counts.get(word, 0) + 1
361
+ sorted_words = sorted(word_counts.items(), key=lambda x: x[1], reverse=True)
362
+ required_skills = [word.capitalize() for word, _ in sorted_words[:5]]
363
+
364
+ job_summary = summarize_text(job_description, models, max_length=100)
365
 
366
  return {
367
  "title": job_title,
368
  "years_experience": years_required,
369
  "required_skills": required_skills,
370
+ "summary": job_summary
371
  }
372
 
373
  def evaluate_job_fit(resume_summary, job_requirements, models):
374
+ """Evaluate how well a resume matches job requirements"""
375
+ start_time = time.time()
376
 
377
+ # Extract information
378
  required_skills = job_requirements["required_skills"]
379
  years_required = job_requirements["years_experience"]
380
  job_title = job_requirements["title"]
381
  skills_mentioned = extract_skills(resume_summary)
382
 
383
+ # Calculate match percentages
384
  matching_skills = [skill for skill in required_skills if skill in skills_mentioned]
385
+ skill_match_percentage = len(matching_skills) / len(required_skills) if required_skills else 0
386
 
387
+ # Extract experience level from resume
388
+ experience_pattern = r'(\d+)\+?\s*years?\s*(?:of)?\s*experience'
389
  years_experience = 0
390
+ experience_match = re.search(experience_pattern, resume_summary, re.IGNORECASE)
391
+ if experience_match:
392
+ try:
393
+ years_experience = int(experience_match.group(1))
394
+ except:
395
+ pass
396
 
397
+ # Calculate match scores
398
  exp_match_ratio = min(1.0, years_experience / max(1, years_required)) if years_required > 0 else 0.5
 
 
399
 
400
+ # Job title match score
401
+ title_words = [word for word in job_title.lower().split() if len(word) > 3]
402
+ title_matches = sum(1 for word in title_words if word in resume_summary.lower())
403
+ title_match = title_matches / len(title_words) if title_words else 0
404
+
405
+ # Calculate individual scores
406
+ skill_score = min(2, skill_match_percentage * 3)
407
  exp_score = min(2, exp_match_ratio * 2)
408
  title_score = min(2, title_match * 2)
409
 
410
  # Extract candidate info
411
+ name_match = re.search(r'Name:\s*(.*?)(?=\n|\Z)', resume_summary)
412
+ name = name_match.group(1).strip() if name_match else "The candidate"
413
 
414
+ industry_match = re.search(r'Expected Industry:\s*(.*?)(?=\n|\Z)', resume_summary)
415
+ industry = industry_match.group(1).strip() if industry_match else "unspecified industry"
416
 
417
+ # Calculate final weighted score
418
  weighted_score = (skill_score * 0.5) + (exp_score * 0.3) + (title_score * 0.2)
 
419
 
420
+ # Determine fit score
421
+ if weighted_score >= 1.5:
422
+ fit_score = 2 # Good fit
423
+ elif weighted_score >= 0.8:
424
+ fit_score = 1 # Potential fit
425
+ else:
426
+ fit_score = 0 # Not a fit
427
+
428
+ # Generate assessment text
429
+ missing_skills = [skill for skill in required_skills if skill not in skills_mentioned]
430
 
431
  if fit_score == 2:
432
+ fit_assessment = f"{fit_score}: GOOD FIT - {name} demonstrates strong alignment with the {job_title} position. Their background in {industry} and professional experience appear well-suited for this role's requirements. The technical expertise matches what the position demands."
433
  elif fit_score == 1:
434
+ fit_assessment = f"{fit_score}: POTENTIAL FIT - {name} shows potential for the {job_title} role with some relevant experience, though there are gaps in certain technical areas. Their {industry} background provides partial alignment with the position requirements. Additional training might be needed in {', '.join(missing_skills[:2])} if pursuing this opportunity."
435
  else:
436
+ fit_assessment = f"{fit_score}: NO FIT - {name}'s current background shows limited alignment with this {job_title} position. Their experience level and technical background differ significantly from the role requirements. A position better matching their {industry} expertise might be more suitable."
437
 
438
+ return fit_assessment, fit_score, time.time() - start_time
439
 
440
  def analyze_job_fit(resume_summary, job_description, models):
441
+ """End-to-end job fit analysis"""
442
+ start_time = time.time()
443
  job_requirements = extract_job_requirements(job_description, models)
444
+ assessment, fit_score, execution_time = evaluate_job_fit(resume_summary, job_requirements, models)
445
+ return assessment, fit_score, time.time() - start_time
446
 
447
  #####################################
448
  # Main Function
449
  #####################################
450
  def main():
451
+ """Main function for the Streamlit application"""
452
  st.title("Resume-Job Fit Analyzer")
453
+ st.markdown("Upload your resume file in **.docx**, **.doc**, or **.txt** format and enter a job description to see how well you match with the job requirements.")
454
 
455
+ # Load models
456
  models = load_models()
457
+
458
+ # User inputs
459
+ uploaded_file = st.file_uploader("Upload your resume (.docx, .doc, or .txt)", type=["docx", "doc", "txt"])
460
  job_description = st.text_area("Enter Job Description", height=200, placeholder="Paste the job description here...")
461
 
462
  # Process when button clicked
463
+ if uploaded_file is not None and job_description and st.button("Analyze Job Fit"):
464
+ progress_bar = st.progress(0)
465
+ status_text = st.empty()
466
 
467
  # Step 1: Extract text
468
+ status_text.text("Step 1/3: Extracting text from resume...")
469
  resume_text = extract_text_from_file(uploaded_file)
470
+ progress_bar.progress(25)
471
 
472
  if resume_text.startswith("Error") or resume_text == "Unsupported file type. Please upload a .docx, .doc, or .txt file.":
473
  st.error(resume_text)
474
  else:
475
  # Step 2: Generate summary
476
+ status_text.text("Step 2/3: Analyzing resume and generating summary...")
477
+ summary, summarization_time = summarize_resume_text(resume_text, models)
478
+ progress_bar.progress(50)
479
+
480
+ # Display summary
481
  st.subheader("Your Resume Summary")
482
  st.markdown(summary)
483
 
484
+ # Step 3: Generate job fit assessment
485
+ status_text.text("Step 3/3: Evaluating job fit (this will take a moment)...")
486
+ assessment, fit_score, assessment_time = analyze_job_fit(summary, job_description, models)
487
+ progress_bar.progress(100)
488
+ status_text.empty()
489
 
490
  # Display results
491
  st.subheader("Job Fit Assessment")
492
+
493
+ # Display score with appropriate styling
494
  fit_labels = {0: "NOT FIT", 1: "POTENTIAL FIT", 2: "GOOD FIT"}
495
+ score_colors = {0: "red", 1: "orange", 2: "green"}
496
+ st.markdown(f"<h2 style='color: {score_colors[fit_score]};'>{fit_labels[fit_score]}</h2>", unsafe_allow_html=True)
497
  st.markdown(assessment)
498
+ st.info(f"Analysis completed in {(summarization_time + assessment_time):.2f} seconds")
499
 
500
  # Recommendations
501
  st.subheader("Recommended Next Steps")
502
+
503
  if fit_score == 2:
504
  st.markdown("""
505
  - Apply for this position as you appear to be a good match