Upload 3 files
Browse files- pages/lecture_finder.py +123 -0
- pages/research_paper_finder.py +135 -0
- pages/resume_generator.py +454 -0
pages/lecture_finder.py
ADDED
@@ -0,0 +1,123 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import googleapiclient.discovery
|
3 |
+
from dotenv import load_dotenv
|
4 |
+
from datetime import timedelta
|
5 |
+
|
6 |
+
# Load environment variables
|
7 |
+
load_dotenv()
|
8 |
+
|
9 |
+
# Set up YouTube API client
|
10 |
+
api_service_name = "youtube"
|
11 |
+
api_version = "v3"
|
12 |
+
DEVELOPER_KEY = "AIzaSyD-1OMuZ0CxGAek0PaXrzHOmcDWFvZQtm8"
|
13 |
+
youtube = googleapiclient.discovery.build(api_service_name, api_version, developerKey=DEVELOPER_KEY)
|
14 |
+
|
15 |
+
def search_youtube(query, max_results=50):
|
16 |
+
try:
|
17 |
+
request = youtube.search().list(
|
18 |
+
q=query,
|
19 |
+
type="video",
|
20 |
+
part="id,snippet",
|
21 |
+
maxResults=max_results,
|
22 |
+
fields="items(id(videoId),snippet(title,description,thumbnails))"
|
23 |
+
)
|
24 |
+
response = request.execute()
|
25 |
+
return response.get('items', [])
|
26 |
+
except googleapiclient.errors.HttpError as e:
|
27 |
+
st.error(f"An error occurred: {e}")
|
28 |
+
return []
|
29 |
+
|
30 |
+
def get_video_details(video_id):
|
31 |
+
try:
|
32 |
+
request = youtube.videos().list(
|
33 |
+
part="contentDetails,statistics",
|
34 |
+
id=video_id,
|
35 |
+
fields="items(contentDetails(duration),statistics(viewCount))"
|
36 |
+
)
|
37 |
+
response = request.execute()
|
38 |
+
return response['items'][0] if response['items'] else None
|
39 |
+
except googleapiclient.errors.HttpError as e:
|
40 |
+
st.error(f"An error occurred while fetching video details: {e}")
|
41 |
+
return None
|
42 |
+
|
43 |
+
def format_duration(duration):
|
44 |
+
duration = duration.replace('PT', '')
|
45 |
+
hours = 0
|
46 |
+
minutes = 0
|
47 |
+
seconds = 0
|
48 |
+
if 'H' in duration:
|
49 |
+
hours, duration = duration.split('H')
|
50 |
+
hours = int(hours)
|
51 |
+
if 'M' in duration:
|
52 |
+
minutes, duration = duration.split('M')
|
53 |
+
minutes = int(minutes)
|
54 |
+
if 'S' in duration:
|
55 |
+
seconds = int(duration.replace('S', ''))
|
56 |
+
return f"{hours:02d}:{minutes:02d}:{seconds:02d}"
|
57 |
+
|
58 |
+
def parse_duration(duration_str):
|
59 |
+
parts = duration_str.split(':')
|
60 |
+
if len(parts) == 3:
|
61 |
+
return timedelta(hours=int(parts[0]), minutes=int(parts[1]), seconds=int(parts[2]))
|
62 |
+
elif len(parts) == 2:
|
63 |
+
return timedelta(minutes=int(parts[0]), seconds=int(parts[1]))
|
64 |
+
else:
|
65 |
+
return timedelta(seconds=int(parts[0]))
|
66 |
+
|
67 |
+
def main():
|
68 |
+
st.set_page_config(page_title="S.H.E.R.L.O.C.K. Learning Assistant", page_icon="🕵️", layout="wide")
|
69 |
+
st.sidebar.title("S.H.E.R.L.O.C.K.")
|
70 |
+
st.sidebar.markdown("""
|
71 |
+
**S**ystematic **H**olistic **E**ducational **R**esource for **L**earning and **O**ptimizing **C**ognitive **K**nowledge
|
72 |
+
|
73 |
+
Enhance your cognitive abilities, memory techniques, and subject-specific knowledge with AI-powered personalized learning.
|
74 |
+
""")
|
75 |
+
|
76 |
+
query = st.sidebar.text_input("What would you like to learn about?", "")
|
77 |
+
|
78 |
+
min_duration = st.sidebar.selectbox(
|
79 |
+
"Minimum video duration",
|
80 |
+
["Any", "5:00", "10:00", "15:00", "30:00", "45:00", "1:00:00"],
|
81 |
+
index=0
|
82 |
+
)
|
83 |
+
|
84 |
+
search_button = st.sidebar.button("Search for Learning Resources")
|
85 |
+
|
86 |
+
st.title("Learning Resources")
|
87 |
+
|
88 |
+
if search_button and query:
|
89 |
+
with st.spinner("Searching for the best learning resources..."):
|
90 |
+
results = search_youtube(query)
|
91 |
+
|
92 |
+
if results:
|
93 |
+
filtered_results = []
|
94 |
+
for item in results:
|
95 |
+
video_id = item['id']['videoId']
|
96 |
+
video_details = get_video_details(video_id)
|
97 |
+
|
98 |
+
if video_details:
|
99 |
+
duration = video_details['contentDetails']['duration']
|
100 |
+
formatted_duration = format_duration(duration)
|
101 |
+
views = int(video_details['statistics']['viewCount'])
|
102 |
+
|
103 |
+
if min_duration == "Any" or parse_duration(formatted_duration) >= parse_duration(min_duration):
|
104 |
+
filtered_results.append((item, formatted_duration, views))
|
105 |
+
|
106 |
+
if filtered_results:
|
107 |
+
for item, duration, views in filtered_results:
|
108 |
+
col1, col2 = st.columns([1, 3])
|
109 |
+
with col1:
|
110 |
+
st.image(item['snippet']['thumbnails']['medium']['url'], use_column_width=True)
|
111 |
+
with col2:
|
112 |
+
st.markdown(f"### [{item['snippet']['title']}](https://www.youtube.com/watch?v={item['id']['videoId']})")
|
113 |
+
st.markdown(f"**Duration:** {duration} | **Views:** {views:,}")
|
114 |
+
st.markdown(item['snippet']['description'])
|
115 |
+
|
116 |
+
st.markdown("---")
|
117 |
+
else:
|
118 |
+
st.warning("No results found matching your duration criteria. Try adjusting the minimum duration or search query.")
|
119 |
+
else:
|
120 |
+
st.warning("No results found. Please try a different search query.")
|
121 |
+
|
122 |
+
if __name__ == "__main__":
|
123 |
+
main()
|
pages/research_paper_finder.py
ADDED
@@ -0,0 +1,135 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import requests
|
3 |
+
from datetime import datetime, timedelta
|
4 |
+
import pandas as pd
|
5 |
+
from dotenv import load_dotenv
|
6 |
+
import os
|
7 |
+
|
8 |
+
# Load environment variables
|
9 |
+
load_dotenv()
|
10 |
+
|
11 |
+
# Scopus API key
|
12 |
+
SCOPUS_API_KEY = "8e94f85eb6044ef1cde06e8b5a426a09"
|
13 |
+
|
14 |
+
def search_scopus(query, start_year, end_year, max_results=50):
|
15 |
+
base_url = "https://api.elsevier.com/content/search/scopus"
|
16 |
+
|
17 |
+
params = {
|
18 |
+
"query": query,
|
19 |
+
"date": f"{start_year}-{end_year}",
|
20 |
+
"count": max_results,
|
21 |
+
"sort": "citedby-count desc",
|
22 |
+
"field": "title,author,year,publicationName,description,citedby-count,doi,eid"
|
23 |
+
}
|
24 |
+
|
25 |
+
headers = {
|
26 |
+
"X-ELS-APIKey": SCOPUS_API_KEY,
|
27 |
+
"Accept": "application/json"
|
28 |
+
}
|
29 |
+
|
30 |
+
try:
|
31 |
+
response = requests.get(base_url, params=params, headers=headers)
|
32 |
+
response.raise_for_status()
|
33 |
+
return response.json()["search-results"]["entry"]
|
34 |
+
except requests.exceptions.RequestException as e:
|
35 |
+
st.error(f"An error occurred while searching Scopus: {e}")
|
36 |
+
return []
|
37 |
+
|
38 |
+
def format_authors(author_info):
|
39 |
+
if isinstance(author_info, list):
|
40 |
+
return ", ".join([author.get("authname", "") for author in author_info])
|
41 |
+
elif isinstance(author_info, dict):
|
42 |
+
return author_info.get("authname", "")
|
43 |
+
else:
|
44 |
+
return "N/A"
|
45 |
+
|
46 |
+
def safe_get(dictionary, keys, default="N/A"):
|
47 |
+
for key in keys:
|
48 |
+
if isinstance(dictionary, dict) and key in dictionary:
|
49 |
+
dictionary = dictionary[key]
|
50 |
+
else:
|
51 |
+
return default
|
52 |
+
return dictionary
|
53 |
+
|
54 |
+
def get_paper_link(paper):
|
55 |
+
doi = safe_get(paper, ["prism:doi"])
|
56 |
+
if doi != "N/A":
|
57 |
+
return f"https://doi.org/{doi}"
|
58 |
+
eid = safe_get(paper, ["eid"])
|
59 |
+
if eid != "N/A":
|
60 |
+
return f"https://www.scopus.com/record/display.uri?eid={eid}&origin=resultslist"
|
61 |
+
return "#"
|
62 |
+
|
63 |
+
def main():
|
64 |
+
st.set_page_config(page_title="S.H.E.R.L.O.C.K. Research Assistant", page_icon="🔬", layout="wide")
|
65 |
+
|
66 |
+
st.sidebar.title("S.H.E.R.L.O.C.K.")
|
67 |
+
st.sidebar.markdown("""
|
68 |
+
**S**ystematic **H**olistic **E**ducational **R**esource for **L**iterature and **O**ptimizing **C**ognitive **K**nowledge
|
69 |
+
|
70 |
+
Enhance your research capabilities with AI-powered literature search and analysis.
|
71 |
+
""")
|
72 |
+
|
73 |
+
query = st.sidebar.text_input("What topic would you like to research?", "")
|
74 |
+
|
75 |
+
current_year = datetime.now().year
|
76 |
+
start_year, end_year = st.sidebar.slider(
|
77 |
+
"Publication Year Range",
|
78 |
+
min_value=1900,
|
79 |
+
max_value=current_year,
|
80 |
+
value=(current_year-5, current_year)
|
81 |
+
)
|
82 |
+
|
83 |
+
max_results = st.sidebar.slider("Maximum number of results", 10, 100, 50)
|
84 |
+
|
85 |
+
search_button = st.sidebar.button("Search for Research Papers")
|
86 |
+
|
87 |
+
st.title("Research Papers and Articles")
|
88 |
+
|
89 |
+
if search_button and query:
|
90 |
+
with st.spinner("Searching for the most relevant research papers..."):
|
91 |
+
results = search_scopus(query, start_year, end_year, max_results)
|
92 |
+
|
93 |
+
if results:
|
94 |
+
papers = []
|
95 |
+
for paper in results:
|
96 |
+
papers.append({
|
97 |
+
"Title": safe_get(paper, ["dc:title"]),
|
98 |
+
"Authors": format_authors(safe_get(paper, ["author"])),
|
99 |
+
"Year": safe_get(paper, ["prism:coverDate"])[:4],
|
100 |
+
"Journal": safe_get(paper, ["prism:publicationName"]),
|
101 |
+
"Abstract": safe_get(paper, ["dc:description"]),
|
102 |
+
"Citations": safe_get(paper, ["citedby-count"], "0"),
|
103 |
+
"Link": get_paper_link(paper)
|
104 |
+
})
|
105 |
+
|
106 |
+
df = pd.DataFrame(papers)
|
107 |
+
|
108 |
+
st.markdown(f"### Found {len(results)} papers on '{query}'")
|
109 |
+
|
110 |
+
for _, paper in df.iterrows():
|
111 |
+
with st.container():
|
112 |
+
col1, col2 = st.columns([3, 1])
|
113 |
+
with col1:
|
114 |
+
st.markdown(f"#### [{paper['Title']}]({paper['Link']})")
|
115 |
+
st.markdown(f"**Authors:** {paper['Authors']}")
|
116 |
+
st.markdown(f"**Published in:** {paper['Journal']} ({paper['Year']})")
|
117 |
+
st.markdown(f"**Abstract:** {paper['Abstract']}")
|
118 |
+
with col2:
|
119 |
+
st.metric("Citations", paper["Citations"])
|
120 |
+
|
121 |
+
st.markdown("---")
|
122 |
+
|
123 |
+
# Download results as CSV
|
124 |
+
csv = df.to_csv(index=False).encode('utf-8')
|
125 |
+
st.download_button(
|
126 |
+
label="Download results as CSV",
|
127 |
+
data=csv,
|
128 |
+
file_name=f"{query.replace(' ', '_')}_research_papers.csv",
|
129 |
+
mime="text/csv",
|
130 |
+
)
|
131 |
+
else:
|
132 |
+
st.warning("No results found. Please try a different search query or adjust the year range.")
|
133 |
+
|
134 |
+
if __name__ == "__main__":
|
135 |
+
main()
|
pages/resume_generator.py
ADDED
@@ -0,0 +1,454 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import base64
|
3 |
+
from io import BytesIO
|
4 |
+
from datetime import datetime
|
5 |
+
import json
|
6 |
+
from docx import Document
|
7 |
+
from docx.shared import Inches, Pt
|
8 |
+
from docx.enum.text import WD_ALIGN_PARAGRAPH
|
9 |
+
from reportlab.lib.pagesizes import letter
|
10 |
+
from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer, Table, TableStyle, Image
|
11 |
+
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
|
12 |
+
from reportlab.lib.enums import TA_CENTER, TA_JUSTIFY
|
13 |
+
from reportlab.lib import colors
|
14 |
+
from langchain.chat_models import ChatOpenAI
|
15 |
+
from langchain.schema import HumanMessage
|
16 |
+
from PIL import Image as PILImage
|
17 |
+
|
18 |
+
AI71_BASE_URL = "https://api.ai71.ai/v1/"
|
19 |
+
AI71_API_KEY = "api71-api-92fc2ef9-9f3c-47e5-a019-18e257b04af2"
|
20 |
+
|
21 |
+
def get_llm():
|
22 |
+
return ChatOpenAI(
|
23 |
+
model="tiiuae/falcon-180B-chat",
|
24 |
+
api_key=AI71_API_KEY,
|
25 |
+
base_url=AI71_BASE_URL,
|
26 |
+
streaming=True,
|
27 |
+
)
|
28 |
+
|
29 |
+
def generate_resume_content(resume_data):
|
30 |
+
llm = get_llm()
|
31 |
+
|
32 |
+
prompt = f"""
|
33 |
+
Generate a highly professional and ATS-optimized resume based on the following information:
|
34 |
+
|
35 |
+
Name: {resume_data['name']}
|
36 |
+
Email: {resume_data['email']}
|
37 |
+
Phone: {resume_data['phone']}
|
38 |
+
Location: {resume_data['location']}
|
39 |
+
|
40 |
+
Work Experience:
|
41 |
+
{json.dumps(resume_data['work_experience'], indent=2)}
|
42 |
+
|
43 |
+
Education:
|
44 |
+
{json.dumps(resume_data['education'], indent=2)}
|
45 |
+
|
46 |
+
Skills: {', '.join(resume_data['skills'])}
|
47 |
+
|
48 |
+
Please generate a compelling professional summary and enhance the job descriptions.
|
49 |
+
Use action verbs, quantify achievements where possible, and highlight key skills.
|
50 |
+
Ensure the content is tailored for ATS optimization.
|
51 |
+
The output should be in JSON format with the following structure:
|
52 |
+
{{
|
53 |
+
"summary": "Professional summary here",
|
54 |
+
"work_experience": [
|
55 |
+
{{
|
56 |
+
"title": "Job title",
|
57 |
+
"company": "Company name",
|
58 |
+
"start_date": "Start date",
|
59 |
+
"end_date": "End date",
|
60 |
+
"description": "Enhanced job description with bullet points"
|
61 |
+
}}
|
62 |
+
]
|
63 |
+
}}
|
64 |
+
"""
|
65 |
+
|
66 |
+
try:
|
67 |
+
response = llm([HumanMessage(content=prompt)])
|
68 |
+
enhanced_content = json.loads(response.content)
|
69 |
+
|
70 |
+
resume_data['summary'] = enhanced_content['summary']
|
71 |
+
resume_data['work_experience'] = enhanced_content['work_experience']
|
72 |
+
|
73 |
+
return resume_data
|
74 |
+
except Exception as e:
|
75 |
+
st.error(f"An error occurred while generating AI content: {str(e)}")
|
76 |
+
return resume_data
|
77 |
+
|
78 |
+
def create_docx(resume_data):
|
79 |
+
doc = Document()
|
80 |
+
|
81 |
+
# Styles
|
82 |
+
styles = doc.styles
|
83 |
+
style = styles.add_style('Name', 1)
|
84 |
+
style.font.name = 'Calibri'
|
85 |
+
style.font.size = Pt(24)
|
86 |
+
style.font.bold = True
|
87 |
+
style.paragraph_format.alignment = WD_ALIGN_PARAGRAPH.CENTER
|
88 |
+
|
89 |
+
# Add photo if provided
|
90 |
+
if 'photo' in resume_data and resume_data['photo']:
|
91 |
+
image_stream = BytesIO(resume_data['photo'])
|
92 |
+
doc.add_picture(image_stream, width=Inches(2.0))
|
93 |
+
|
94 |
+
# Add name
|
95 |
+
doc.add_paragraph(resume_data['name'], style='Name')
|
96 |
+
|
97 |
+
# Add contact information
|
98 |
+
contact_info = doc.add_paragraph()
|
99 |
+
contact_info.alignment = WD_ALIGN_PARAGRAPH.CENTER
|
100 |
+
contact_info.add_run(f"{resume_data['email']} | {resume_data['phone']} | {resume_data['location']}")
|
101 |
+
|
102 |
+
# Add summary
|
103 |
+
doc.add_heading('Professional Summary', level=1)
|
104 |
+
doc.add_paragraph(resume_data['summary'])
|
105 |
+
|
106 |
+
# Add work experience
|
107 |
+
doc.add_heading('Work Experience', level=1)
|
108 |
+
for job in resume_data['work_experience']:
|
109 |
+
p = doc.add_paragraph(f"{job['title']} at {job['company']}", style='Heading 2')
|
110 |
+
p.add_run(f"\n{job['start_date']} - {job['end_date']}")
|
111 |
+
for bullet in job['description'].split('\n'):
|
112 |
+
if bullet.strip():
|
113 |
+
doc.add_paragraph(bullet.strip(), style='List Bullet')
|
114 |
+
|
115 |
+
# Add education
|
116 |
+
doc.add_heading('Education', level=1)
|
117 |
+
for edu in resume_data['education']:
|
118 |
+
p = doc.add_paragraph(f"{edu['degree']} in {edu['field']}", style='Heading 2')
|
119 |
+
p.add_run(f"\n{edu['institution']}, {edu['graduation_date']}")
|
120 |
+
|
121 |
+
# Add skills
|
122 |
+
doc.add_heading('Skills', level=1)
|
123 |
+
doc.add_paragraph(', '.join(resume_data['skills']))
|
124 |
+
|
125 |
+
buffer = BytesIO()
|
126 |
+
doc.save(buffer)
|
127 |
+
buffer.seek(0)
|
128 |
+
return buffer
|
129 |
+
|
130 |
+
def create_pdf(resume_data):
|
131 |
+
buffer = BytesIO()
|
132 |
+
doc = SimpleDocTemplate(buffer, pagesize=letter, rightMargin=72, leftMargin=72, topMargin=72, bottomMargin=18)
|
133 |
+
|
134 |
+
styles = getSampleStyleSheet()
|
135 |
+
styles.add(ParagraphStyle(name='Justify', alignment=TA_JUSTIFY))
|
136 |
+
styles.add(ParagraphStyle(name='Center', alignment=TA_CENTER))
|
137 |
+
|
138 |
+
story = []
|
139 |
+
|
140 |
+
# Add photo if provided
|
141 |
+
if 'photo' in resume_data and resume_data['photo']:
|
142 |
+
image_stream = BytesIO(resume_data['photo'])
|
143 |
+
img = Image(image_stream, width=100, height=100)
|
144 |
+
story.append(img)
|
145 |
+
|
146 |
+
# Add name
|
147 |
+
story.append(Paragraph(resume_data['name'], styles['Title']))
|
148 |
+
|
149 |
+
# Add contact information
|
150 |
+
story.append(Paragraph(f"{resume_data['email']} | {resume_data['phone']} | {resume_data['location']}", styles['Center']))
|
151 |
+
story.append(Spacer(1, 12))
|
152 |
+
|
153 |
+
# Add summary
|
154 |
+
story.append(Paragraph('Professional Summary', styles['Heading1']))
|
155 |
+
story.append(Paragraph(resume_data['summary'], styles['Justify']))
|
156 |
+
story.append(Spacer(1, 12))
|
157 |
+
|
158 |
+
# Add work experience
|
159 |
+
story.append(Paragraph('Work Experience', styles['Heading1']))
|
160 |
+
for job in resume_data['work_experience']:
|
161 |
+
story.append(Paragraph(f"{job['title']} at {job['company']}", styles['Heading2']))
|
162 |
+
story.append(Paragraph(f"{job['start_date']} - {job['end_date']}", styles['Normal']))
|
163 |
+
for bullet in job['description'].split('\n'):
|
164 |
+
if bullet.strip():
|
165 |
+
story.append(Paragraph(f"• {bullet.strip()}", styles['Normal']))
|
166 |
+
story.append(Spacer(1, 12))
|
167 |
+
|
168 |
+
# Add education
|
169 |
+
story.append(Paragraph('Education', styles['Heading1']))
|
170 |
+
for edu in resume_data['education']:
|
171 |
+
story.append(Paragraph(f"{edu['degree']} in {edu['field']}", styles['Heading2']))
|
172 |
+
story.append(Paragraph(f"{edu['institution']}, {edu['graduation_date']}", styles['Normal']))
|
173 |
+
story.append(Spacer(1, 12))
|
174 |
+
|
175 |
+
# Add skills
|
176 |
+
story.append(Paragraph('Skills', styles['Heading1']))
|
177 |
+
story.append(Paragraph(', '.join(resume_data['skills']), styles['Normal']))
|
178 |
+
|
179 |
+
doc.build(story)
|
180 |
+
buffer.seek(0)
|
181 |
+
return buffer
|
182 |
+
|
183 |
+
def create_txt(resume_data):
|
184 |
+
txt_content = f"{resume_data['name']}\n"
|
185 |
+
txt_content += f"{resume_data['email']} | {resume_data['phone']} | {resume_data['location']}\n\n"
|
186 |
+
|
187 |
+
txt_content += "Professional Summary\n"
|
188 |
+
txt_content += f"{resume_data['summary']}\n\n"
|
189 |
+
|
190 |
+
txt_content += "Work Experience\n"
|
191 |
+
for job in resume_data['work_experience']:
|
192 |
+
txt_content += f"{job['title']} at {job['company']}\n"
|
193 |
+
txt_content += f"{job['start_date']} - {job['end_date']}\n"
|
194 |
+
for bullet in job['description'].split('\n'):
|
195 |
+
if bullet.strip():
|
196 |
+
txt_content += f"• {bullet.strip()}\n"
|
197 |
+
txt_content += "\n"
|
198 |
+
|
199 |
+
txt_content += "Education\n"
|
200 |
+
for edu in resume_data['education']:
|
201 |
+
txt_content += f"{edu['degree']} in {edu['field']}\n"
|
202 |
+
txt_content += f"{edu['institution']}, {edu['graduation_date']}\n\n"
|
203 |
+
|
204 |
+
txt_content += "Skills\n"
|
205 |
+
txt_content += ', '.join(resume_data['skills'])
|
206 |
+
|
207 |
+
return txt_content.encode()
|
208 |
+
|
209 |
+
def calculate_ats_score(resume_data):
|
210 |
+
score = 0
|
211 |
+
max_score = 100
|
212 |
+
|
213 |
+
# Check for key sections
|
214 |
+
if resume_data['name']: score += 5
|
215 |
+
if resume_data['email']: score += 5
|
216 |
+
if resume_data['phone']: score += 5
|
217 |
+
if resume_data['location']: score += 5
|
218 |
+
if resume_data['summary']: score += 10
|
219 |
+
if resume_data['work_experience']: score += 20
|
220 |
+
if resume_data['education']: score += 15
|
221 |
+
if resume_data['skills']: score += 15
|
222 |
+
|
223 |
+
# Check content quality
|
224 |
+
if len(resume_data['summary'].split()) >= 50: score += 5
|
225 |
+
if len(resume_data['work_experience']) >= 2: score += 5
|
226 |
+
if len(resume_data['skills']) >= 5: score += 5
|
227 |
+
|
228 |
+
# Check for keywords (this is a simplified version, in reality, you'd want to check against job-specific keywords)
|
229 |
+
keywords = ['experience', 'skills', 'project', 'team', 'leadership', 'communication', 'achieved', 'improved', 'managed', 'developed']
|
230 |
+
resume_text = ' '.join([str(value) for value in resume_data.values() if isinstance(value, str)])
|
231 |
+
for keyword in keywords:
|
232 |
+
if keyword in resume_text.lower():
|
233 |
+
score += 1
|
234 |
+
|
235 |
+
return min(score, max_score)
|
236 |
+
|
237 |
+
def main():
|
238 |
+
st.set_page_config(page_title="AI-Enhanced Resume Builder", page_icon="📄", layout="wide")
|
239 |
+
|
240 |
+
st.markdown("""
|
241 |
+
<style>
|
242 |
+
.big-font {
|
243 |
+
font-size:30px !important;
|
244 |
+
font-weight: bold;
|
245 |
+
}
|
246 |
+
.stButton>button {
|
247 |
+
width: 100%;
|
248 |
+
}
|
249 |
+
</style>
|
250 |
+
""", unsafe_allow_html=True)
|
251 |
+
|
252 |
+
# Add sidebar
|
253 |
+
st.sidebar.title("About This Project")
|
254 |
+
st.sidebar.write("""
|
255 |
+
Welcome to the AI-Enhanced Resume Builder!
|
256 |
+
|
257 |
+
This project helps you create a professional, ATS-optimized resume with the power of AI. Here's what you can do:
|
258 |
+
|
259 |
+
1. Input your personal information
|
260 |
+
2. Add your work experience
|
261 |
+
3. Include your education details
|
262 |
+
4. List your skills
|
263 |
+
5. Optionally upload a photo
|
264 |
+
6. Generate AI-enhanced content
|
265 |
+
7. Review and download your resume
|
266 |
+
|
267 |
+
The AI will help improve your resume content and provide an ATS compatibility score.
|
268 |
+
|
269 |
+
Get started by filling out the form and clicking 'Next' at each step!
|
270 |
+
""")
|
271 |
+
|
272 |
+
st.markdown('<p class="big-font">AI-Enhanced Resume Builder</p>', unsafe_allow_html=True)
|
273 |
+
st.write("Create a professional, ATS-optimized resume with AI-powered content enhancement")
|
274 |
+
|
275 |
+
# Initialize session state
|
276 |
+
if 'step' not in st.session_state:
|
277 |
+
st.session_state.step = 1
|
278 |
+
|
279 |
+
if 'resume_data' not in st.session_state:
|
280 |
+
st.session_state.resume_data = {
|
281 |
+
'name': '', 'email': '', 'phone': '', 'location': '',
|
282 |
+
'summary': '', 'work_experience': [], 'education': [], 'skills': [], 'photo': None
|
283 |
+
}
|
284 |
+
|
285 |
+
# Step 1: Personal Information
|
286 |
+
if st.session_state.step == 1:
|
287 |
+
st.subheader("Step 1: Personal Information")
|
288 |
+
name = st.text_input("Full Name", st.session_state.resume_data['name'])
|
289 |
+
email = st.text_input("Email", st.session_state.resume_data['email'])
|
290 |
+
phone = st.text_input("Phone", st.session_state.resume_data['phone'])
|
291 |
+
location = st.text_input("Location", st.session_state.resume_data['location'])
|
292 |
+
|
293 |
+
photo_upload = st.file_uploader("Upload a photo (optional)", type=['jpg', 'jpeg', 'png'])
|
294 |
+
if photo_upload:
|
295 |
+
image = PILImage.open(photo_upload)
|
296 |
+
st.image(image, caption='Uploaded Image', use_column_width=True)
|
297 |
+
buffered = BytesIO()
|
298 |
+
image.save(buffered, format="PNG")
|
299 |
+
st.session_state.resume_data['photo'] = buffered.getvalue()
|
300 |
+
|
301 |
+
if st.button("Next"):
|
302 |
+
if name and email and phone and location:
|
303 |
+
st.session_state.resume_data.update({
|
304 |
+
'name': name,
|
305 |
+
'email': email,
|
306 |
+
'phone': phone,
|
307 |
+
'location': location
|
308 |
+
})
|
309 |
+
st.session_state.step = 2
|
310 |
+
else:
|
311 |
+
st.error("Please fill in all required fields before proceeding.")
|
312 |
+
|
313 |
+
# Step 2: Work Experience
|
314 |
+
elif st.session_state.step == 2:
|
315 |
+
st.subheader("Step 2: Work Experience")
|
316 |
+
num_jobs = st.number_input("Number of jobs to add", min_value=1, max_value=10, value=len(st.session_state.resume_data['work_experience']) or 1)
|
317 |
+
|
318 |
+
work_experience = []
|
319 |
+
for i in range(num_jobs):
|
320 |
+
st.write(f"Job {i+1}")
|
321 |
+
job = {}
|
322 |
+
job['title'] = st.text_input(f"Job Title {i+1}", st.session_state.resume_data['work_experience'][i]['title'] if i < len(st.session_state.resume_data['work_experience']) else '')
|
323 |
+
job['company'] = st.text_input(f"Company {i+1}", st.session_state.resume_data['work_experience'][i]['company'] if i < len(st.session_state.resume_data['work_experience']) else '')
|
324 |
+
job['start_date'] = st.date_input(f"Start Date {i+1}", value=datetime.strptime(st.session_state.resume_data['work_experience'][i]['start_date'] if i < len(st.session_state.resume_data['work_experience']) else '2020-01-01', '%Y-%m-%d')).strftime('%Y-%m-%d')
|
325 |
+
job['end_date'] = st.date_input(f"End Date {i+1}", value=datetime.strptime(st.session_state.resume_data['work_experience'][i]['end_date'] if i < len(st.session_state.resume_data['work_experience']) else '2023-01-01', '%Y-%m-%d')).strftime('%Y-%m-%d')
|
326 |
+
job['description'] = st.text_area(f"Job Description {i+1}", st.session_state.resume_data['work_experience'][i]['description'] if i < len(st.session_state.resume_data['work_experience']) else '', height=100)
|
327 |
+
work_experience.append(job)
|
328 |
+
|
329 |
+
col1, col2 = st.columns(2)
|
330 |
+
if col1.button("Previous"):
|
331 |
+
st.session_state.step = 1
|
332 |
+
if col2.button("Next"):
|
333 |
+
if all(job['title'] and job['company'] and job['description'] for job in work_experience):
|
334 |
+
st.session_state.resume_data['work_experience'] = work_experience
|
335 |
+
st.session_state.step = 3
|
336 |
+
else:
|
337 |
+
st.error("Please fill in all required fields for each job before proceeding.")
|
338 |
+
|
339 |
+
# Step 3: Education
|
340 |
+
elif st.session_state.step == 3:
|
341 |
+
st.subheader("Step 3: Education")
|
342 |
+
num_edu = st.number_input("Number of education entries", min_value=1, max_value=5, value=len(st.session_state.resume_data['education']) or 1)
|
343 |
+
|
344 |
+
education = []
|
345 |
+
for i in range(num_edu):
|
346 |
+
st.write(f"Education {i+1}")
|
347 |
+
edu = {}
|
348 |
+
edu['degree'] = st.text_input(f"Degree {i+1}", st.session_state.resume_data['education'][i]['degree'] if i < len(st.session_state.resume_data['education']) else '')
|
349 |
+
edu['field'] = st.text_input(f"Field of Study {i+1}", st.session_state.resume_data['education'][i]['field'] if i < len(st.session_state.resume_data['education']) else '')
|
350 |
+
edu['institution'] = st.text_input(f"Institution {i+1}", st.session_state.resume_data['education'][i]['institution'] if i < len(st.session_state.resume_data['education']) else '')
|
351 |
+
edu['graduation_date'] = st.date_input(f"Graduation Date {i+1}", value=datetime.strptime(st.session_state.resume_data['education'][i]['graduation_date'] if i < len(st.session_state.resume_data['education']) else '2023-01-01', '%Y-%m-%d')).strftime('%Y-%m-%d')
|
352 |
+
education.append(edu)
|
353 |
+
|
354 |
+
col1, col2 = st.columns(2)
|
355 |
+
if col1.button("Previous"):
|
356 |
+
st.session_state.step = 2
|
357 |
+
if col2.button("Next"):
|
358 |
+
if all(edu['degree'] and edu['field'] and edu['institution'] for edu in education):
|
359 |
+
st.session_state.resume_data['education'] = education
|
360 |
+
st.session_state.step = 4
|
361 |
+
else:
|
362 |
+
st.error("Please fill in all required fields for each education entry before proceeding.")
|
363 |
+
|
364 |
+
# Step 4: Skills and Generation
|
365 |
+
elif st.session_state.step == 4:
|
366 |
+
st.subheader("Step 4: Skills and Resume Generation")
|
367 |
+
skills_input = st.text_input("Skills (comma-separated)", ', '.join(st.session_state.resume_data['skills']))
|
368 |
+
|
369 |
+
if st.button("Generate Resume"):
|
370 |
+
if skills_input.strip():
|
371 |
+
st.session_state.resume_data['skills'] = [skill.strip() for skill in skills_input.split(',') if skill.strip()]
|
372 |
+
with st.spinner("Generating AI-enhanced resume content..."):
|
373 |
+
st.session_state.resume_data = generate_resume_content(st.session_state.resume_data)
|
374 |
+
st.session_state.step = 5
|
375 |
+
st.experimental_rerun()
|
376 |
+
else:
|
377 |
+
st.error("Please enter at least one skill before generating the resume.")
|
378 |
+
|
379 |
+
# Step 5: Review and Download
|
380 |
+
elif st.session_state.step == 5:
|
381 |
+
st.subheader("Generated Resume")
|
382 |
+
|
383 |
+
# Display resume content for review
|
384 |
+
st.write("### Personal Information")
|
385 |
+
st.write(f"**Name:** {st.session_state.resume_data['name']}")
|
386 |
+
st.write(f"**Email:** {st.session_state.resume_data['email']}")
|
387 |
+
st.write(f"**Phone:** {st.session_state.resume_data['phone']}")
|
388 |
+
st.write(f"**Location:** {st.session_state.resume_data['location']}")
|
389 |
+
|
390 |
+
if st.session_state.resume_data['photo']:
|
391 |
+
st.image(st.session_state.resume_data['photo'], caption='Your Photo', width=200)
|
392 |
+
|
393 |
+
st.write("### Professional Summary")
|
394 |
+
st.write(st.session_state.resume_data['summary'])
|
395 |
+
|
396 |
+
st.write("### Work Experience")
|
397 |
+
for job in st.session_state.resume_data['work_experience']:
|
398 |
+
st.write(f"**{job['title']} at {job['company']}**")
|
399 |
+
st.write(f"{job['start_date']} - {job['end_date']}")
|
400 |
+
st.write(job['description'])
|
401 |
+
|
402 |
+
st.write("### Education")
|
403 |
+
for edu in st.session_state.resume_data['education']:
|
404 |
+
st.write(f"**{edu['degree']} in {edu['field']}**")
|
405 |
+
st.write(f"{edu['institution']}, {edu['graduation_date']}")
|
406 |
+
|
407 |
+
st.write("### Skills")
|
408 |
+
st.write(', '.join(st.session_state.resume_data['skills']))
|
409 |
+
|
410 |
+
# Calculate and display ATS score
|
411 |
+
ats_score = calculate_ats_score(st.session_state.resume_data)
|
412 |
+
st.write(f"### ATS Compatibility Score: {ats_score}%")
|
413 |
+
|
414 |
+
# Download options
|
415 |
+
st.write("### Download Options")
|
416 |
+
col1, col2, col3 = st.columns(3)
|
417 |
+
|
418 |
+
docx_buffer = create_docx(st.session_state.resume_data)
|
419 |
+
col1.download_button(
|
420 |
+
label="Download as DOCX",
|
421 |
+
data=docx_buffer,
|
422 |
+
file_name="resume.docx",
|
423 |
+
mime="application/vnd.openxmlformats-officedocument.wordprocessingml.document"
|
424 |
+
)
|
425 |
+
|
426 |
+
pdf_buffer = create_pdf(st.session_state.resume_data)
|
427 |
+
col2.download_button(
|
428 |
+
label="Download as PDF",
|
429 |
+
data=pdf_buffer,
|
430 |
+
file_name="resume.pdf",
|
431 |
+
mime="application/pdf"
|
432 |
+
)
|
433 |
+
|
434 |
+
txt_content = create_txt(st.session_state.resume_data)
|
435 |
+
col3.download_button(
|
436 |
+
label="Download as TXT",
|
437 |
+
data=txt_content,
|
438 |
+
file_name="resume.txt",
|
439 |
+
mime="text/plain"
|
440 |
+
)
|
441 |
+
|
442 |
+
if st.button("Edit Resume"):
|
443 |
+
st.session_state.step = 1
|
444 |
+
|
445 |
+
if st.button("Start Over"):
|
446 |
+
st.session_state.step = 1
|
447 |
+
st.session_state.resume_data = {
|
448 |
+
'name': '', 'email': '', 'phone': '', 'location': '',
|
449 |
+
'summary': '', 'work_experience': [], 'education': [], 'skills': [], 'photo': None
|
450 |
+
}
|
451 |
+
st.experimental_rerun()
|
452 |
+
|
453 |
+
if __name__ == "__main__":
|
454 |
+
main()
|