Spaces:
Sleeping
Sleeping
File size: 5,563 Bytes
92adbc4 d014ab9 92adbc4 b926e84 92adbc4 4a494c9 92adbc4 d014ab9 92adbc4 4a494c9 92adbc4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 |
# -*- coding: utf-8 -*-
"""Resume_generation_Gemini_pro.ipynb
Automatically generated by Colab.
Original file is located at
https://colab.research.google.com/drive/16z793IRwVmvKYCaOLGZFDYj-XOj8zEJL
"""
# from google.colab import drive,userdata
# drive.mount('/content/drive')
# !pip install streamlit -qq
# !pip install PyPDF2 -qq
# !pip install langchain_community -qq
# !pip install langchain_google_genai -qq
# !pip install python-docx -qq
# !pip install docx2txt -qq
# !pip install faiss-gpu -qq
# !pip install google-generativeai -qq
# !pip install --upgrade google-generativeai -qq
import docx2txt
import PyPDF2
def extract_text(file_path):
if file_path.endswith(".docx"):
# Extract text from DOCX file
return docx2txt.process(file_path)
elif file_path.endswith(".pdf"):
# Extract text from PDF file
text = ""
with open(file_path, 'rb') as file:
reader = PyPDF2.PdfReader(file)
for page_num in range(len(reader.pages)):
text += reader.pages[page_num].extract_text()
return text
else:
raise ValueError("Unsupported file type")
# from google.colab import auth
# auth.authenticate_user()
import os
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "firm-capsule-436804-b5-5f553d9f1043.json"
# !pip install python-docx
import os
import streamlit as st
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.vectorstores.faiss import FAISS
# from google.colab import drive
from docx import Document
import google.generativeai as genai
from datetime import datetime
api_key_google = 'AIzaSyC8rXXpyVnAnnMG1rxPOF0JpWWPnCH1h_Y'
genai.configure(api_key=api_key_google)
# Mount Google Drive
# drive.mount('/content/drive')
model = genai.GenerativeModel('gemini-pro')
def save_resume_to_docx(tailored_resume, file_path):
doc = Document()
doc.add_heading('Tailored Resume', level=1)
doc.add_paragraph(tailored_resume)
doc.save(file_path)
# Function to read text from a .docx file
def read_docx(file_path):
doc = Document(file_path)
return "\n".join([para.text for para in doc.paragraphs])
def generate_resume_text(resume_text):
prompt = f"""
Given the following resume content:
[Resume Start]
{resume_text}
[Resume End]
Format this resume content with appropriate section titles. Only use the information provided and avoid placeholders like "[Your Name]". Ensure it retains the structure and details exactly as shown.
"""
try:
response = model.generate_content(prompt)
print(response)
# Accessing the generated text content
return response.candidates[0].content.parts[0].text
except Exception as e:
print("Error in generating resume text:", e)
return None
def tailor_resume(resume_text, job_description):
# Use the generate_resume_text function to get the formatted resume content
formatted_resume = generate_resume_text(resume_text)
print("formatted resume:",formatted_resume)
if formatted_resume:
prompt = f"""
Below is the candidate's original formatted resume content:
[Resume Start]
{formatted_resume}
[Resume End]
Using the candidate's resume above and the job description below, create a tailored resume.
[Job Description Start]
{job_description}
[Job Description End]
Please generate a resume that:
1. Uses real data from the candidate's resume, including name, education, and specific skills.
2. Avoids placeholders like "[Your Name]" and includes actual details.
3. Emphasizes experiences that are directly relevant to the job description.
"""
try:
response = model.generate_content(prompt)
return response.candidates[0].content.parts[0].text
except Exception as e:
print("Error in tailoring resume:", e)
return None
else:
return "Failed to generate resume text."
#Entry function for the model
def generate_gemini(current_resume,job_description):
st.header('Resume Tailoring')
# Load the resume and job description from Google Drive
resume_text = extract_text(current_resume)
job_description = extract_text(job_description)
# Tailor resume based on job description
tailored_resume = tailor_resume(resume_text, job_description)
st.write("**Tailored Resume:**")
st.write(tailored_resume)
print(tailored_resume)
# Save the tailored resume to a .docx file
if tailored_resume:
file_path = f"Tailored_Resume_{datetime.now().strftime('%Y%m%d_%H%M%S')}.docx"
save_resume_to_docx(tailored_resume, file_path)
st.success(f"Tailored resume saved to {file_path}")
return tailored_resume
# Main function for Streamlit app
# def Gemini_pro_main(current_resume,job_description):
# st.header('Resume Tailoring')
# # Load the resume and job description from Google Drive
# resume_text = extract_text(current_resume)
# job_description = extract_text(job_description)
# # Tailor resume based on job description
# tailored_resume = tailor_resume(resume_text, job_description)
# st.write("**Tailored Resume:**")
# st.write(tailored_resume)
# print(tailored_resume)
# # Save the tailored resume to a .docx file
# if tailored_resume:
# file_path = f"Tailored_Resume_{datetime.now().strftime('%Y%m%d_%H%M%S')}.docx"
# save_resume_to_docx(tailored_resume, file_path)
# st.success(f"Tailored resume saved to {file_path}")
# if __name__ == '__main__':
# main()
|