Spaces:
Sleeping
Sleeping
Anushkabhat9
commited on
Commit
·
92adbc4
1
Parent(s):
5c7154d
all
Browse files- app.py +49 -0
- firm-capsule-436804-b5-5f553d9f1043.json +13 -0
- requirements.txt +8 -0
- resume_generation_gemini_pro.py +159 -0
app.py
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
"""Untitled27.ipynb
|
3 |
+
|
4 |
+
Automatically generated by Colab.
|
5 |
+
|
6 |
+
Original file is located at
|
7 |
+
https://colab.research.google.com/drive/16VnJw-SMttFaPZi8gMJTp4k7YDqL-rP0
|
8 |
+
"""
|
9 |
+
|
10 |
+
import streamlit as st
|
11 |
+
import os
|
12 |
+
from Resume_generation_Gemini_pro import Gemini_pro_main # Import function from your model file
|
13 |
+
|
14 |
+
# Helper function to save uploaded files temporarily and return their paths
|
15 |
+
def save_uploaded_file(uploaded_file):
|
16 |
+
# Define the temporary file path
|
17 |
+
file_path = os.path.join("/tmp", uploaded_file.name)
|
18 |
+
|
19 |
+
# Write the uploaded file content to the path
|
20 |
+
with open(file_path, "wb") as f:
|
21 |
+
f.write(uploaded_file.getbuffer())
|
22 |
+
|
23 |
+
return file_path
|
24 |
+
|
25 |
+
# Streamlit UI layout
|
26 |
+
st.title("Resume Tailoring with Google Generative AI")
|
27 |
+
st.write("Upload your current resume and a job description to generate a tailored resume.")
|
28 |
+
|
29 |
+
# File uploaders for the current resume and job description
|
30 |
+
uploaded_resume = st.file_uploader("Upload Current Resume (.docx or .pdf)", type=["docx", "pdf"], key="resume")
|
31 |
+
uploaded_job_description = st.file_uploader("Upload Job Description (.docx or .pdf)", type=["docx", "pdf"], key="job_description")
|
32 |
+
|
33 |
+
if uploaded_resume is not None and uploaded_job_description is not None:
|
34 |
+
# Save both uploaded files and get their paths
|
35 |
+
resume_path = save_uploaded_file(uploaded_resume)
|
36 |
+
job_description_path = save_uploaded_file(uploaded_job_description)
|
37 |
+
st.write(f"Files saved at: {resume_path} and {job_description_path}")
|
38 |
+
|
39 |
+
# Generate tailored resume button
|
40 |
+
if st.button("Generate Tailored Resume"):
|
41 |
+
with st.spinner("Generating resume with Google Generative AI..."):
|
42 |
+
# Call the model function with both file paths
|
43 |
+
Gemini_pro_main(resume_path, job_description_path)
|
44 |
+
|
45 |
+
# Display the generated tailored resume
|
46 |
+
# st.subheader("Generated Tailored Resume:")
|
47 |
+
# st.write(generated_resume)
|
48 |
+
else:
|
49 |
+
st.warning("Please upload both the current resume and job description files.")
|
firm-capsule-436804-b5-5f553d9f1043.json
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"type": "service_account",
|
3 |
+
"project_id": "firm-capsule-436804-b5",
|
4 |
+
"private_key_id": "5f553d9f10439131e44a6fd903ad024e9872344c",
|
5 |
+
"private_key": "-----BEGIN PRIVATE KEY-----\nMIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCfhRmE+Fj5zz1F\ndPPOdVsukF7aDuJf00VlsA5j0gcZHY/RoXnDaCLGGMi2hqfMoWRhszjxdVjMuLpb\n9rgYltttMQooq3hZZtWmgXpu7FnIjEg11M+XwBQlOrFE0Zo5RFmuMk8il0rdphju\nW8K9qXz1/ncsa+fFH5DLcSqv0WMkn5yJPR9mSzgVavQOtTKzatPkFYVfCoAYR9Zl\nOQmlAZjC4y53qnsXbIkw44LMKqJOTxi5eFqQo6aWS0f9l7vvN/ZKw0FU4PiVlWrj\nGMJx+gn20R6rmBdz9Y2MYl519mumC8ahxvordW8rDBntGkR6FTACgZn2Ixh9FavD\nHGyIIN/zAgMBAAECggEACFWL38aZ33NUYCycEE1RVj10ty/P4/HFzmpEVjLC7Cz/\nNpSZJ/xo1LC2y9J9MdO8yt3yO6tF2GxOUD+znpVDIZFOVpGe2AznXKunGeR/TFsH\n6sgTkHoYk1ldqZrJ0/QeO1ulhskgwDPNWaaxpfcU8EU7NLbuhP4BMp/W+xiVdXRM\nOmry2FakATF9O62mhxPGeFW1RnZrHLFEYmC6lclMoJ/JzwwltKn8tVy5nDlHzrqi\nqNNAsGZwI2+dyBI7xGEo5uBtjCKKBC00mCXs9PHzXVKlvH1gHkP7RsboOdEhRyJ2\noTD6EdcE0Qg7/O/7JahBaJjNIPdMgrDovrF5Hy0jAQKBgQDOf6MHubfXz53h36GB\n6QvvOyeNXqp7HagukgoQX/682FsuNltI8xM9hQ2XwfM3z6YzPmpOuAvzM1WixMpE\nOCKdYF8Ne+gh/WRkTVdslF/+YQXoEgL9zdhXecjACsADW9qIP5R+jD/tb58WMgK4\nMDq+sX2k9Ga1VJR74RfzlqK5/QKBgQDFwnxj/TZXUCgaxP3+MazVZjmDDdsd3i8g\nX/XHvFjPNinpnPnn6cFEitTMdtwy3i2T4EuKYG780QixzlWN8yBRIx1K1WBz/7nw\npumgatehLBLCoIb/qDM6DabaLYDxPfM0ik0nrElYdREgDe93h83zpS/KaEiJ41B7\nRu+NOzFsrwKBgQCUVJLjJcCtQPGEIoN9Mc3k1nUe03VaYn9AlJ6uDIaLWE6G73y6\n9gBIHJ8a97Kh0ILuJFNwoDvVQnatKPax8jDLI0KkCPFCGSZX9kZqNFqP+1mmzelg\nLtDMZSqS6CXtQ7nCTQl5rXzuLVP1OY9Ch0JrYEuEhhqQ/mR6Z8Iws2/IcQKBgAZi\nlSmVT1/aUrEqkxjV+cKaHllA5UY9mj/tWT4tsqBeG1RToS3/uiHAacaAP/PUgJw5\nrba2Dl9TUHiNNO30X6K/A38bDJYyN4qxmPF5AaVOivyFulYe1CiXsD9zIKqpWWTk\n4kDZqzST8w2Z5ZVyaQ/o0XYzj8QD9uFEFQ/2TU45AoGAbfdleVXy4aWBRaLuqJcR\ndqFr/EomGj1dR00pOY0qNS9AuqpI9Kt7sTkaJYVnYgrlCX2xkin1y3+c4dvnBsC0\n6wG69ztZ9BibLhhNc82PNfTQoGdh8jT+4T8GqubbC5Wi03tRiHTfXWWpX+86CW+Y\nRvlqI5p9EOBJSqwEksd+jxY=\n-----END PRIVATE KEY-----\n",
|
6 |
+
"client_email": "[email protected]",
|
7 |
+
"client_id": "105433240521758892195",
|
8 |
+
"auth_uri": "https://accounts.google.com/o/oauth2/auth",
|
9 |
+
"token_uri": "https://oauth2.googleapis.com/token",
|
10 |
+
"auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
|
11 |
+
"client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/genai-328%40firm-capsule-436804-b5.iam.gserviceaccount.com",
|
12 |
+
"universe_domain": "googleapis.com"
|
13 |
+
}
|
requirements.txt
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
streamlit
|
2 |
+
PyPDF2
|
3 |
+
langchain_community
|
4 |
+
langchain_google_genai
|
5 |
+
python-docx
|
6 |
+
docx2txt
|
7 |
+
faiss-gpu
|
8 |
+
google-generativeai
|
resume_generation_gemini_pro.py
ADDED
@@ -0,0 +1,159 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
"""Resume_generation_Gemini_pro.ipynb
|
3 |
+
|
4 |
+
Automatically generated by Colab.
|
5 |
+
|
6 |
+
Original file is located at
|
7 |
+
https://colab.research.google.com/drive/16z793IRwVmvKYCaOLGZFDYj-XOj8zEJL
|
8 |
+
"""
|
9 |
+
|
10 |
+
# from google.colab import drive,userdata
|
11 |
+
|
12 |
+
# drive.mount('/content/drive')
|
13 |
+
|
14 |
+
# !pip install streamlit -qq
|
15 |
+
|
16 |
+
# !pip install PyPDF2 -qq
|
17 |
+
|
18 |
+
# !pip install langchain_community -qq
|
19 |
+
|
20 |
+
# !pip install langchain_google_genai -qq
|
21 |
+
|
22 |
+
# !pip install python-docx -qq
|
23 |
+
|
24 |
+
# !pip install docx2txt -qq
|
25 |
+
|
26 |
+
# !pip install faiss-gpu -qq
|
27 |
+
|
28 |
+
# !pip install google-generativeai -qq
|
29 |
+
|
30 |
+
# !pip install --upgrade google-generativeai -qq
|
31 |
+
|
32 |
+
import docx2txt
|
33 |
+
def extract_text(file_path):
|
34 |
+
if file_path.endswith(".docx"):
|
35 |
+
# Extract text from DOCX file
|
36 |
+
return docx2txt.process(file_path)
|
37 |
+
|
38 |
+
elif file_path.endswith(".pdf"):
|
39 |
+
# Extract text from PDF file
|
40 |
+
text = ""
|
41 |
+
with open(file_path, 'rb') as file:
|
42 |
+
reader = PyPDF2.PdfReader(file)
|
43 |
+
for page_num in range(len(reader.pages)):
|
44 |
+
text += reader.pages[page_num].extract_text()
|
45 |
+
return text
|
46 |
+
|
47 |
+
else:
|
48 |
+
raise ValueError("Unsupported file type")
|
49 |
+
|
50 |
+
# from google.colab import auth
|
51 |
+
# auth.authenticate_user()
|
52 |
+
|
53 |
+
import os
|
54 |
+
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "firm-capsule-436804-b5-5f553d9f1043.json"
|
55 |
+
|
56 |
+
# !pip install python-docx
|
57 |
+
|
58 |
+
import os
|
59 |
+
import streamlit as st
|
60 |
+
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
61 |
+
from langchain_community.vectorstores.faiss import FAISS
|
62 |
+
from google.colab import drive
|
63 |
+
from docx import Document
|
64 |
+
import google.generativeai as genai
|
65 |
+
from datetime import datetime
|
66 |
+
|
67 |
+
api_key_google = 'AIzaSyC8rXXpyVnAnnMG1rxPOF0JpWWPnCH1h_Y'
|
68 |
+
genai.configure(api_key=api_key_google)
|
69 |
+
|
70 |
+
# Mount Google Drive
|
71 |
+
# drive.mount('/content/drive')
|
72 |
+
|
73 |
+
model = genai.GenerativeModel('gemini-pro')
|
74 |
+
|
75 |
+
def save_resume_to_docx(tailored_resume, file_path):
|
76 |
+
doc = Document()
|
77 |
+
doc.add_heading('Tailored Resume', level=1)
|
78 |
+
doc.add_paragraph(tailored_resume)
|
79 |
+
doc.save(file_path)
|
80 |
+
|
81 |
+
# Function to read text from a .docx file
|
82 |
+
def read_docx(file_path):
|
83 |
+
doc = Document(file_path)
|
84 |
+
return "\n".join([para.text for para in doc.paragraphs])
|
85 |
+
|
86 |
+
def generate_resume_text(resume_text):
|
87 |
+
prompt = f"""
|
88 |
+
Given the following resume content:
|
89 |
+
|
90 |
+
[Resume Start]
|
91 |
+
{resume_text}
|
92 |
+
[Resume End]
|
93 |
+
|
94 |
+
Format this resume content with appropriate section titles. Only use the information provided and avoid placeholders like "[Your Name]". Ensure it retains the structure and details exactly as shown.
|
95 |
+
"""
|
96 |
+
try:
|
97 |
+
response = model.generate_content(prompt)
|
98 |
+
print(response)
|
99 |
+
# Accessing the generated text content
|
100 |
+
return response.candidates[0].content.parts[0].text
|
101 |
+
except Exception as e:
|
102 |
+
print("Error in generating resume text:", e)
|
103 |
+
return None
|
104 |
+
|
105 |
+
def tailor_resume(resume_text, job_description):
|
106 |
+
# Use the generate_resume_text function to get the formatted resume content
|
107 |
+
formatted_resume = generate_resume_text(resume_text)
|
108 |
+
print("formatted resume:",formatted_resume)
|
109 |
+
if formatted_resume:
|
110 |
+
prompt = f"""
|
111 |
+
Below is the candidate's original formatted resume content:
|
112 |
+
|
113 |
+
[Resume Start]
|
114 |
+
{formatted_resume}
|
115 |
+
[Resume End]
|
116 |
+
|
117 |
+
Using the candidate's resume above and the job description below, create a tailored resume.
|
118 |
+
|
119 |
+
[Job Description Start]
|
120 |
+
{job_description}
|
121 |
+
[Job Description End]
|
122 |
+
|
123 |
+
Please generate a resume that:
|
124 |
+
1. Uses real data from the candidate's resume, including name, education, and specific skills.
|
125 |
+
2. Avoids placeholders like "[Your Name]" and includes actual details.
|
126 |
+
3. Emphasizes experiences that are directly relevant to the job description.
|
127 |
+
"""
|
128 |
+
try:
|
129 |
+
response = model.generate_content(prompt)
|
130 |
+
return response.candidates[0].content.parts[0].text
|
131 |
+
except Exception as e:
|
132 |
+
print("Error in tailoring resume:", e)
|
133 |
+
return None
|
134 |
+
else:
|
135 |
+
return "Failed to generate resume text."
|
136 |
+
|
137 |
+
# Main function for Streamlit app
|
138 |
+
def Gemini_pro_main(current_resume,job_description):
|
139 |
+
st.header('Resume Tailoring')
|
140 |
+
|
141 |
+
# Load the resume and job description from Google Drive
|
142 |
+
resume_text = read_docx(current_resume)
|
143 |
+
job_description = read_docx(job_description)
|
144 |
+
|
145 |
+
# Tailor resume based on job description
|
146 |
+
tailored_resume = tailor_resume(resume_text, job_description)
|
147 |
+
st.write("**Tailored Resume:**")
|
148 |
+
st.write(tailored_resume)
|
149 |
+
print(tailored_resume)
|
150 |
+
|
151 |
+
# Save the tailored resume to a .docx file
|
152 |
+
if tailored_resume:
|
153 |
+
file_path = f"Tailored_Resume_{datetime.now().strftime('%Y%m%d_%H%M%S')}.docx"
|
154 |
+
save_resume_to_docx(tailored_resume, file_path)
|
155 |
+
st.success(f"Tailored resume saved to {file_path}")
|
156 |
+
|
157 |
+
# if __name__ == '__main__':
|
158 |
+
# main()
|
159 |
+
|