Spaces:
Runtime error
Runtime error
Upload 4 files
Browse files- firm-capsule-436804-b5-5f553d9f1043.json +13 -0
- logo.jpeg +0 -0
- resume_generation_gemini_pro.py +188 -0
- similarity_score_refined.py +145 -0
firm-capsule-436804-b5-5f553d9f1043.json
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"type": "service_account",
|
3 |
+
"project_id": "firm-capsule-436804-b5",
|
4 |
+
"private_key_id": "5f553d9f10439131e44a6fd903ad024e9872344c",
|
5 |
+
"private_key": "-----BEGIN PRIVATE KEY-----\nMIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCfhRmE+Fj5zz1F\ndPPOdVsukF7aDuJf00VlsA5j0gcZHY/RoXnDaCLGGMi2hqfMoWRhszjxdVjMuLpb\n9rgYltttMQooq3hZZtWmgXpu7FnIjEg11M+XwBQlOrFE0Zo5RFmuMk8il0rdphju\nW8K9qXz1/ncsa+fFH5DLcSqv0WMkn5yJPR9mSzgVavQOtTKzatPkFYVfCoAYR9Zl\nOQmlAZjC4y53qnsXbIkw44LMKqJOTxi5eFqQo6aWS0f9l7vvN/ZKw0FU4PiVlWrj\nGMJx+gn20R6rmBdz9Y2MYl519mumC8ahxvordW8rDBntGkR6FTACgZn2Ixh9FavD\nHGyIIN/zAgMBAAECggEACFWL38aZ33NUYCycEE1RVj10ty/P4/HFzmpEVjLC7Cz/\nNpSZJ/xo1LC2y9J9MdO8yt3yO6tF2GxOUD+znpVDIZFOVpGe2AznXKunGeR/TFsH\n6sgTkHoYk1ldqZrJ0/QeO1ulhskgwDPNWaaxpfcU8EU7NLbuhP4BMp/W+xiVdXRM\nOmry2FakATF9O62mhxPGeFW1RnZrHLFEYmC6lclMoJ/JzwwltKn8tVy5nDlHzrqi\nqNNAsGZwI2+dyBI7xGEo5uBtjCKKBC00mCXs9PHzXVKlvH1gHkP7RsboOdEhRyJ2\noTD6EdcE0Qg7/O/7JahBaJjNIPdMgrDovrF5Hy0jAQKBgQDOf6MHubfXz53h36GB\n6QvvOyeNXqp7HagukgoQX/682FsuNltI8xM9hQ2XwfM3z6YzPmpOuAvzM1WixMpE\nOCKdYF8Ne+gh/WRkTVdslF/+YQXoEgL9zdhXecjACsADW9qIP5R+jD/tb58WMgK4\nMDq+sX2k9Ga1VJR74RfzlqK5/QKBgQDFwnxj/TZXUCgaxP3+MazVZjmDDdsd3i8g\nX/XHvFjPNinpnPnn6cFEitTMdtwy3i2T4EuKYG780QixzlWN8yBRIx1K1WBz/7nw\npumgatehLBLCoIb/qDM6DabaLYDxPfM0ik0nrElYdREgDe93h83zpS/KaEiJ41B7\nRu+NOzFsrwKBgQCUVJLjJcCtQPGEIoN9Mc3k1nUe03VaYn9AlJ6uDIaLWE6G73y6\n9gBIHJ8a97Kh0ILuJFNwoDvVQnatKPax8jDLI0KkCPFCGSZX9kZqNFqP+1mmzelg\nLtDMZSqS6CXtQ7nCTQl5rXzuLVP1OY9Ch0JrYEuEhhqQ/mR6Z8Iws2/IcQKBgAZi\nlSmVT1/aUrEqkxjV+cKaHllA5UY9mj/tWT4tsqBeG1RToS3/uiHAacaAP/PUgJw5\nrba2Dl9TUHiNNO30X6K/A38bDJYyN4qxmPF5AaVOivyFulYe1CiXsD9zIKqpWWTk\n4kDZqzST8w2Z5ZVyaQ/o0XYzj8QD9uFEFQ/2TU45AoGAbfdleVXy4aWBRaLuqJcR\ndqFr/EomGj1dR00pOY0qNS9AuqpI9Kt7sTkaJYVnYgrlCX2xkin1y3+c4dvnBsC0\n6wG69ztZ9BibLhhNc82PNfTQoGdh8jT+4T8GqubbC5Wi03tRiHTfXWWpX+86CW+Y\nRvlqI5p9EOBJSqwEksd+jxY=\n-----END PRIVATE KEY-----\n",
|
6 |
+
"client_email": "[email protected]",
|
7 |
+
"client_id": "105433240521758892195",
|
8 |
+
"auth_uri": "https://accounts.google.com/o/oauth2/auth",
|
9 |
+
"token_uri": "https://oauth2.googleapis.com/token",
|
10 |
+
"auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
|
11 |
+
"client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/genai-328%40firm-capsule-436804-b5.iam.gserviceaccount.com",
|
12 |
+
"universe_domain": "googleapis.com"
|
13 |
+
}
|
logo.jpeg
ADDED
![]() |
resume_generation_gemini_pro.py
ADDED
@@ -0,0 +1,188 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
"""Resume_generation_Gemini_pro.ipynb
|
3 |
+
|
4 |
+
Automatically generated by Colab.
|
5 |
+
|
6 |
+
Original file is located at
|
7 |
+
https://colab.research.google.com/drive/16z793IRwVmvKYCaOLGZFDYj-XOj8zEJL
|
8 |
+
"""
|
9 |
+
|
10 |
+
# from google.colab import drive,userdata
|
11 |
+
|
12 |
+
# drive.mount('/content/drive')
|
13 |
+
|
14 |
+
# !pip install streamlit -qq
|
15 |
+
|
16 |
+
# !pip install PyPDF2 -qq
|
17 |
+
|
18 |
+
# !pip install langchain_community -qq
|
19 |
+
|
20 |
+
# !pip install langchain_google_genai -qq
|
21 |
+
|
22 |
+
# !pip install python-docx -qq
|
23 |
+
|
24 |
+
# !pip install docx2txt -qq
|
25 |
+
|
26 |
+
# !pip install faiss-gpu -qq
|
27 |
+
|
28 |
+
# !pip install google-generativeai -qq
|
29 |
+
|
30 |
+
# !pip install --upgrade google-generativeai -qq
|
31 |
+
|
32 |
+
import docx2txt
|
33 |
+
import PyPDF2
|
34 |
+
def extract_text(file_path):
|
35 |
+
if file_path.endswith(".docx"):
|
36 |
+
# Extract text from DOCX file
|
37 |
+
return docx2txt.process(file_path)
|
38 |
+
|
39 |
+
elif file_path.endswith(".pdf"):
|
40 |
+
# Extract text from PDF file
|
41 |
+
text = ""
|
42 |
+
with open(file_path, 'rb') as file:
|
43 |
+
reader = PyPDF2.PdfReader(file)
|
44 |
+
for page_num in range(len(reader.pages)):
|
45 |
+
text += reader.pages[page_num].extract_text()
|
46 |
+
return text
|
47 |
+
|
48 |
+
else:
|
49 |
+
raise ValueError("Unsupported file type")
|
50 |
+
|
51 |
+
# from google.colab import auth
|
52 |
+
# auth.authenticate_user()
|
53 |
+
|
54 |
+
import os
|
55 |
+
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "firm-capsule-436804-b5-5f553d9f1043.json"
|
56 |
+
|
57 |
+
# !pip install python-docx
|
58 |
+
|
59 |
+
import os
|
60 |
+
import streamlit as st
|
61 |
+
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
62 |
+
from langchain_community.vectorstores.faiss import FAISS
|
63 |
+
# from google.colab import drive
|
64 |
+
from docx import Document
|
65 |
+
import google.generativeai as genai
|
66 |
+
from datetime import datetime
|
67 |
+
|
68 |
+
api_key_google = 'AIzaSyC8rXXpyVnAnnMG1rxPOF0JpWWPnCH1h_Y'
|
69 |
+
genai.configure(api_key=api_key_google)
|
70 |
+
|
71 |
+
# Mount Google Drive
|
72 |
+
# drive.mount('/content/drive')
|
73 |
+
|
74 |
+
model = genai.GenerativeModel('gemini-pro')
|
75 |
+
|
76 |
+
def save_resume_to_docx(tailored_resume, file_path):
|
77 |
+
doc = Document()
|
78 |
+
doc.add_heading('Tailored Resume', level=1)
|
79 |
+
doc.add_paragraph(tailored_resume)
|
80 |
+
doc.save(file_path)
|
81 |
+
|
82 |
+
# Function to read text from a .docx file
|
83 |
+
def read_docx(file_path):
|
84 |
+
doc = Document(file_path)
|
85 |
+
return "\n".join([para.text for para in doc.paragraphs])
|
86 |
+
|
87 |
+
def generate_resume_text(resume_text):
|
88 |
+
prompt = f"""
|
89 |
+
Given the following resume content:
|
90 |
+
|
91 |
+
[Resume Start]
|
92 |
+
{resume_text}
|
93 |
+
[Resume End]
|
94 |
+
|
95 |
+
Format this resume content with appropriate section titles. Only use the information provided and avoid placeholders like "[Your Name]". Ensure it retains the structure and details exactly as shown.
|
96 |
+
"""
|
97 |
+
try:
|
98 |
+
response = model.generate_content(prompt)
|
99 |
+
print(response)
|
100 |
+
# Accessing the generated text content
|
101 |
+
return response.candidates[0].content.parts[0].text
|
102 |
+
except Exception as e:
|
103 |
+
print("Error in generating resume text:", e)
|
104 |
+
return None
|
105 |
+
|
106 |
+
def tailor_resume(resume_text, job_description):
|
107 |
+
# Use the generate_resume_text function to get the formatted resume content
|
108 |
+
formatted_resume = generate_resume_text(resume_text)
|
109 |
+
print("formatted resume:",formatted_resume)
|
110 |
+
if formatted_resume:
|
111 |
+
prompt = f"""
|
112 |
+
Below is the candidate's original formatted resume content:
|
113 |
+
|
114 |
+
[Resume Start]
|
115 |
+
{formatted_resume}
|
116 |
+
[Resume End]
|
117 |
+
|
118 |
+
Using the candidate's resume above and the job description below, create a tailored resume.
|
119 |
+
|
120 |
+
[Job Description Start]
|
121 |
+
{job_description}
|
122 |
+
[Job Description End]
|
123 |
+
|
124 |
+
Please generate a resume that:
|
125 |
+
1. Uses real data from the candidate's resume, including name, and education.
|
126 |
+
2. Avoids placeholders like "[Your Name]" and includes actual details.
|
127 |
+
3. In the experience section, emphasizes professional experiences and skills that are directly relevant to the job description.
|
128 |
+
4. Keeps only a maximum of the top three accomplishments/ responsibilities for each job position held so as to make the candidate standout in the new job role
|
129 |
+
5. Removes special characters from the section titles
|
130 |
+
6. Only includes publications if the job description is research based
|
131 |
+
7. Summarizes the skills and technical skills section into a brief profile
|
132 |
+
8. Does not include courses, certification, references, skills and a technical skills sections
|
133 |
+
"""
|
134 |
+
try:
|
135 |
+
response = model.generate_content(prompt)
|
136 |
+
return response.candidates[0].content.parts[0].text
|
137 |
+
except Exception as e:
|
138 |
+
print("Error in tailoring resume:", e)
|
139 |
+
return None
|
140 |
+
else:
|
141 |
+
return "Failed to generate resume text."
|
142 |
+
|
143 |
+
#Entry function for the model
|
144 |
+
def generate_gemini(current_resume,job_description):
|
145 |
+
st.header('Resume Tailoring')
|
146 |
+
|
147 |
+
# Load the resume and job description from Google Drive
|
148 |
+
resume_text = extract_text(current_resume)
|
149 |
+
job_description = extract_text(job_description)
|
150 |
+
|
151 |
+
# Tailor resume based on job description
|
152 |
+
tailored_resume = tailor_resume(resume_text, job_description)
|
153 |
+
st.write("**Tailored Resume:**")
|
154 |
+
st.write(tailored_resume)
|
155 |
+
print(tailored_resume)
|
156 |
+
|
157 |
+
# Save the tailored resume to a .docx file
|
158 |
+
if tailored_resume:
|
159 |
+
file_path = f"Tailored_Resume_{datetime.now().strftime('%Y%m%d_%H%M%S')}.docx"
|
160 |
+
save_resume_to_docx(tailored_resume, file_path)
|
161 |
+
st.success(f"Download tailored resume")
|
162 |
+
# st.success(f"Tailored resume saved to {file_path}")
|
163 |
+
|
164 |
+
return tailored_resume, file_path
|
165 |
+
|
166 |
+
# Main function for Streamlit app
|
167 |
+
# def Gemini_pro_main(current_resume,job_description):
|
168 |
+
# st.header('Resume Tailoring')
|
169 |
+
|
170 |
+
# # Load the resume and job description from Google Drive
|
171 |
+
# resume_text = extract_text(current_resume)
|
172 |
+
# job_description = extract_text(job_description)
|
173 |
+
|
174 |
+
# # Tailor resume based on job description
|
175 |
+
# tailored_resume = tailor_resume(resume_text, job_description)
|
176 |
+
# st.write("**Tailored Resume:**")
|
177 |
+
# st.write(tailored_resume)
|
178 |
+
# print(tailored_resume)
|
179 |
+
|
180 |
+
# # Save the tailored resume to a .docx file
|
181 |
+
# if tailored_resume:
|
182 |
+
# file_path = f"Tailored_Resume_{datetime.now().strftime('%Y%m%d_%H%M%S')}.docx"
|
183 |
+
# save_resume_to_docx(tailored_resume, file_path)
|
184 |
+
# st.success(f"Tailored resume saved to {file_path}")
|
185 |
+
|
186 |
+
# if __name__ == '__main__':
|
187 |
+
# main()
|
188 |
+
|
similarity_score_refined.py
ADDED
@@ -0,0 +1,145 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
"""Similarity_score_refined (2).ipynb
|
3 |
+
|
4 |
+
Automatically generated by Colab.
|
5 |
+
|
6 |
+
Original file is located at
|
7 |
+
https://colab.research.google.com/drive/1c8mlCBnLbduLsI8rUGFEOYDuyBqdz2JJ
|
8 |
+
"""
|
9 |
+
|
10 |
+
# !pip install sentence_transformers
|
11 |
+
# !pip install openai==0.28
|
12 |
+
# !pip install docx2txt PyPDF2 transformers
|
13 |
+
|
14 |
+
# from google.colab import drive,userdata
|
15 |
+
# drive.mount("/content/drive")
|
16 |
+
# print("Google Drive mounted.")
|
17 |
+
|
18 |
+
import re
|
19 |
+
from sklearn.feature_extraction.text import TfidfVectorizer
|
20 |
+
from nltk.corpus import stopwords
|
21 |
+
from nltk.stem import WordNetLemmatizer
|
22 |
+
|
23 |
+
# Ensure you have downloaded stopwords and wordnet
|
24 |
+
import nltk
|
25 |
+
nltk.download('stopwords')
|
26 |
+
nltk.download('wordnet')
|
27 |
+
|
28 |
+
def extract_text(file_path):
|
29 |
+
import docx2txt
|
30 |
+
import PyPDF2
|
31 |
+
if file_path.endswith(".docx"):
|
32 |
+
# Extract text from DOCX file
|
33 |
+
return docx2txt.process(file_path)
|
34 |
+
|
35 |
+
elif file_path.endswith(".pdf"):
|
36 |
+
# Extract text from PDF file
|
37 |
+
text = ""
|
38 |
+
with open(file_path, 'rb') as file:
|
39 |
+
reader = PyPDF2.PdfReader(file)
|
40 |
+
for page_num in range(len(reader.pages)):
|
41 |
+
text += reader.pages[page_num].extract_text()
|
42 |
+
return text
|
43 |
+
|
44 |
+
else:
|
45 |
+
raise ValueError("Unsupported file type")
|
46 |
+
|
47 |
+
def preprocess(text):
|
48 |
+
# Lowercase the text
|
49 |
+
text = text.lower()
|
50 |
+
|
51 |
+
# Remove special characters and numbers
|
52 |
+
text = re.sub(r'[^a-z\s]', '', text)
|
53 |
+
|
54 |
+
# Tokenize the text by splitting on whitespace
|
55 |
+
words = text.split()
|
56 |
+
|
57 |
+
# Remove stop words
|
58 |
+
stop_words = set(stopwords.words('english'))
|
59 |
+
words = [word for word in words if word not in stop_words]
|
60 |
+
|
61 |
+
# Lemmatize the words (to get root form)
|
62 |
+
lemmatizer = WordNetLemmatizer()
|
63 |
+
words = [lemmatizer.lemmatize(word) for word in words]
|
64 |
+
|
65 |
+
# Join words back into a single string
|
66 |
+
return ' '.join(words)
|
67 |
+
|
68 |
+
def calculate_tfidf(doc):
|
69 |
+
vectorizer = TfidfVectorizer()
|
70 |
+
tfidf_matrix = vectorizer.fit_transform([doc]) # Only fit on the individual document
|
71 |
+
feature_names = vectorizer.get_feature_names_out()
|
72 |
+
dense_tfidf_matrix = tfidf_matrix.todense()
|
73 |
+
|
74 |
+
# Extract important terms from the document with a threshold
|
75 |
+
important_terms = [feature_names[i] for i in range(len(feature_names)) if dense_tfidf_matrix[0, i] > 0.2]
|
76 |
+
|
77 |
+
return ' '.join(important_terms)
|
78 |
+
|
79 |
+
def call_chatgpt_api(prompt, api_key,model="gpt-3.5-turbo"):
|
80 |
+
import openai
|
81 |
+
openai.api_key = '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'
|
82 |
+
response = openai.ChatCompletion.create(
|
83 |
+
model="gpt-3.5-turbo",
|
84 |
+
messages=[
|
85 |
+
{"role": "system", "content": "You are a helpful assistant."},
|
86 |
+
{"role": "user", "content": prompt}
|
87 |
+
],
|
88 |
+
max_tokens=500,
|
89 |
+
temperature= 0,
|
90 |
+
top_p=1,
|
91 |
+
frequency_penalty= 0,
|
92 |
+
presence_penalty= 0
|
93 |
+
)
|
94 |
+
return response['choices'][0]['message']['content'].strip()
|
95 |
+
|
96 |
+
def calculate_similarity(resume, job_desc, model_name="sentence-transformers/all-MiniLM-L6-v2"):
|
97 |
+
from sentence_transformers import SentenceTransformer, util
|
98 |
+
model = SentenceTransformer(model_name)
|
99 |
+
|
100 |
+
# Convert texts to embeddings
|
101 |
+
embeddings1 = model.encode(resume, convert_to_tensor=True)
|
102 |
+
embeddings2 = model.encode(job_desc, convert_to_tensor=True)
|
103 |
+
|
104 |
+
# Calculate cosine similarity
|
105 |
+
similarity_score = util.pytorch_cos_sim(embeddings1, embeddings2)
|
106 |
+
return similarity_score.item() # return as a scalar
|
107 |
+
|
108 |
+
def similarity_main(resume_path,job_description_path):
|
109 |
+
|
110 |
+
# Extract text from files (replace with actual file paths)
|
111 |
+
Resume_text = extract_text(resume_path)
|
112 |
+
job_des = extract_text(job_description_path)
|
113 |
+
api_key='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'
|
114 |
+
|
115 |
+
|
116 |
+
prompt=f"Extract the skills or competencies section from the resume. Avoid using name of the candidate:\n\n{Resume_text}"
|
117 |
+
resume_skills = call_chatgpt_api(prompt,api_key)
|
118 |
+
experience_prompt = f"Extract the experience of the candidate from the resume. Avoid using name of the candidate:\n\n{Resume_text}"
|
119 |
+
resume_experience = call_chatgpt_api(experience_prompt,api_key)
|
120 |
+
|
121 |
+
# Extract sections from job description (JD)
|
122 |
+
jd_skills_prompt = f"Extract the skills section from the job description:\n\n{job_des}"
|
123 |
+
jd_skills = call_chatgpt_api(jd_skills_prompt,api_key)
|
124 |
+
|
125 |
+
jd_experience_prompt = f"Extract the experience section from the job description:\n\n{job_des}"
|
126 |
+
jd_experience = call_chatgpt_api(jd_experience_prompt,api_key)
|
127 |
+
|
128 |
+
resume_skills_clean = preprocess(resume_skills)
|
129 |
+
jd_skills_clean = preprocess(jd_skills)
|
130 |
+
|
131 |
+
resume_experience_clean = preprocess(resume_experience)
|
132 |
+
jd_experience_clean = preprocess(jd_experience)
|
133 |
+
|
134 |
+
filtered_resume = calculate_tfidf(resume_skills_clean)
|
135 |
+
filtered_jd = calculate_tfidf(jd_skills_clean)
|
136 |
+
similarity_skills=calculate_similarity(filtered_resume,filtered_jd)
|
137 |
+
|
138 |
+
filtered_resume_ex = calculate_tfidf(resume_experience_clean)
|
139 |
+
filtered_jd_ex = calculate_tfidf(jd_experience_clean)
|
140 |
+
similarity_ex=calculate_similarity(filtered_resume_ex,filtered_jd_ex)
|
141 |
+
|
142 |
+
Average_Score=(similarity_skills+similarity_ex)/2
|
143 |
+
percentage= f"{Average_Score * 100:.2f}%"
|
144 |
+
return percentage
|
145 |
+
|