root commited on
Commit
653e0d6
·
1 Parent(s): 23bdad9
Files changed (3) hide show
  1. app.py +6 -1
  2. explanation_generator.py +2 -0
  3. requirements.txt +1 -0
app.py CHANGED
@@ -19,6 +19,8 @@ import PyPDF2
19
  from docx import Document
20
  import csv
21
  from explanation_generator import ExplanationGenerator
 
 
22
 
23
  # Download NLTK resources
24
  try:
@@ -366,8 +368,11 @@ class ResumeScreener:
366
  bm25_scores = self.calculate_bm25_scores(resume_texts, job_description)
367
 
368
  # Normalize BM25 scores
369
- if max(bm25_scores) > 0:
370
  bm25_scores = [score / max(bm25_scores) for score in bm25_scores]
 
 
 
371
 
372
  # Calculate hybrid scores
373
  keyword_weight = 1.0 - semantic_weight
 
19
  from docx import Document
20
  import csv
21
  from explanation_generator import ExplanationGenerator
22
+ from einops.layers.torch import Rearrange, Reduce
23
+ from einops import rearrange, reduce, repeat
24
 
25
  # Download NLTK resources
26
  try:
 
368
  bm25_scores = self.calculate_bm25_scores(resume_texts, job_description)
369
 
370
  # Normalize BM25 scores
371
+ if bm25_scores and len(bm25_scores) > 0 and max(bm25_scores) > 0:
372
  bm25_scores = [score / max(bm25_scores) for score in bm25_scores]
373
+ else:
374
+ # If BM25 scores are empty or all zero, use neutral values
375
+ bm25_scores = [0.5] * len(resume_texts)
376
 
377
  # Calculate hybrid scores
378
  keyword_weight = 1.0 - semantic_weight
explanation_generator.py CHANGED
@@ -9,6 +9,8 @@ import torch
9
  from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
10
  import os
11
  import re
 
 
12
 
13
  # Load QwQ model at initialization time
14
  print("Loading Qwen/QwQ-32B model with 4-bit quantization...")
 
9
  from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
10
  import os
11
  import re
12
+ from einops.layers.torch import Rearrange, Reduce
13
+ from einops import rearrange, reduce, repeat
14
 
15
  # Load QwQ model at initialization time
16
  print("Loading Qwen/QwQ-32B model with 4-bit quantization...")
requirements.txt CHANGED
@@ -18,3 +18,4 @@ huggingface-hub>=0.30.0,<1.0
18
  einops
19
  bitsandbytes>=0.41.0
20
  accelerate>=0.23.0
 
 
18
  einops
19
  bitsandbytes>=0.41.0
20
  accelerate>=0.23.0
21
+ replicate==0.17.0