Spaces:
Sleeping
Sleeping
import streamlit as st | |
import requests | |
from langchain_groq import ChatGroq | |
from streamlit_chat import message | |
import plotly.express as px | |
import pandas as pd | |
import sqlite3 | |
from datetime import datetime, timedelta | |
import re | |
import os | |
import fitz # PyMuPDF | |
from bs4 import BeautifulSoup | |
from streamlit_option_menu import option_menu | |
# Secrets and API Keys | |
GROQ_API_KEY = st.secrets["GROQ_API_KEY"] | |
RAPIDAPI_KEY = st.secrets["RAPIDAPI_KEY"] | |
YOUTUBE_API_KEY = st.secrets["YOUTUBE_API_KEY"] | |
THE_MUSE_API_KEY = st.secrets.get("THE_MUSE_API_KEY", "") | |
BLS_API_KEY = st.secrets.get("BLS_API_KEY", "") | |
llm = ChatGroq( | |
temperature=0, | |
groq_api_key=GROQ_API_KEY, | |
model_name="llama-3.1-70b-versatile" | |
) | |
# ------------------------------- | |
# PDF and HTML Extraction Functions | |
# ------------------------------- | |
def extract_text_from_pdf(pdf_file): | |
""" | |
Extracts text from an uploaded PDF file. | |
""" | |
text = "" | |
try: | |
with fitz.open(stream=pdf_file.read(), filetype="pdf") as doc: | |
for page in doc: | |
text += page.get_text() | |
return text | |
except Exception as e: | |
st.error(f"Error extracting text from PDF: {e}") | |
return "" | |
def extract_job_description(job_link): | |
""" | |
Fetches and extracts job description text from a given URL. | |
""" | |
try: | |
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64)"} | |
response = requests.get(job_link, headers=headers) | |
response.raise_for_status() | |
soup = BeautifulSoup(response.text, 'html.parser') | |
job_description = soup.get_text(separator='\n') | |
return job_description.strip() | |
except Exception as e: | |
st.error(f"Error fetching job description: {e}") | |
return "" | |
def extract_requirements(job_description): | |
""" | |
Uses Groq to extract job requirements from the job description. | |
""" | |
prompt = f""" | |
The following is a job description: | |
{job_description} | |
Extract the list of job requirements, qualifications, and skills from the job description. Provide them as a numbered list. | |
Requirements: | |
""" | |
try: | |
response = llm.invoke(prompt) | |
return response.content.strip() | |
except Exception as e: | |
st.error(f"Error extracting requirements: {e}") | |
return "" | |
# ------------------------------- | |
# Email and Cover Letter Generation | |
# ------------------------------- | |
def generate_email(job_description, requirements, resume_text): | |
""" | |
Generates a personalized cold email using Groq. | |
""" | |
prompt = f""" | |
You are Adithya S Nair, a recent Computer Science graduate specializing in Artificial Intelligence and Machine Learning. Craft a concise and professional cold email to a potential employer based on the following information: | |
**Job Description:** | |
{job_description} | |
**Extracted Requirements:** | |
{requirements} | |
**Your Resume:** | |
{resume_text} | |
**Email Requirements:** | |
- Introduction: Briefly introduce yourself and mention the specific job you are applying for. | |
- Body: Highlight your relevant skills, projects, internships, and leadership experiences. | |
- Value Proposition: Explain how your fresh perspective can add value to the company. | |
- Closing: Express enthusiasm and request an interview. | |
""" | |
try: | |
response = llm.invoke(prompt) | |
return response.content.strip() | |
except Exception as e: | |
st.error(f"Error generating email: {e}") | |
return "" | |
def generate_cover_letter(job_description, requirements, resume_text): | |
""" | |
Generates a personalized cover letter using Groq. | |
""" | |
prompt = f""" | |
You are Adithya S Nair, a recent Computer Science graduate specializing in Artificial Intelligence and Machine Learning. Compose a professional cover letter based on the following information: | |
**Job Description:** | |
{job_description} | |
**Extracted Requirements:** | |
{requirements} | |
**Your Resume:** | |
{resume_text} | |
**Cover Letter Requirements:** | |
1. Greeting: Address the hiring manager. | |
2. Introduction: Mention the position and your enthusiasm. | |
3. Body: Highlight skills, experiences, and relevant projects. | |
4. Value Proposition: Explain how you can contribute to the company. | |
5. Conclusion: Express interest in an interview and thank the reader. | |
""" | |
try: | |
response = llm.invoke(prompt) | |
return response.content.strip() | |
except Exception as e: | |
st.error(f"Error generating cover letter: {e}") | |
return "" | |
# ------------------------------- | |
# Resume Analysis Functions | |
# ------------------------------- | |
def extract_skills(text): | |
""" | |
Extracts a list of skills from the resume text using Groq. | |
""" | |
prompt = f""" | |
Extract a comprehensive list of technical and soft skills from the following resume text. Provide the skills as a comma-separated list. | |
Resume Text: | |
{text} | |
Skills: | |
""" | |
try: | |
response = llm.invoke(prompt) | |
skills = response.content.strip() | |
skills_list = [skill.strip() for skill in re.split(',|\n', skills) if skill.strip()] | |
return skills_list | |
except Exception as e: | |
st.error(f"Error extracting skills: {e}") | |
return [] | |
def suggest_keywords(resume_text, job_description=None): | |
""" | |
Suggests additional relevant keywords for ATS optimization. | |
""" | |
prompt = f""" | |
Analyze the following resume text and suggest additional relevant keywords that can enhance its compatibility with Applicant Tracking Systems (ATS). If a job description is provided, tailor the keywords accordingly. | |
Resume Text: | |
{resume_text} | |
Job Description: | |
{job_description if job_description else "N/A"} | |
Suggested Keywords: | |
""" | |
try: | |
response = llm.invoke(prompt) | |
keywords = response.content.strip() | |
keywords_list = [keyword.strip() for keyword in re.split(',|\n', keywords) if keyword.strip()] | |
return keywords_list | |
except Exception as e: | |
st.error(f"Error suggesting keywords: {e}") | |
return [] | |
def create_skill_distribution_chart(skills): | |
""" | |
Creates a bar chart showing the distribution of skills. | |
""" | |
skill_counts = {} | |
for skill in skills: | |
skill_counts[skill] = skill_counts.get(skill, 0) + 1 | |
df = pd.DataFrame(list(skill_counts.items()), columns=['Skill', 'Count']) | |
fig = px.bar(df, x='Skill', y='Count', title='Skill Distribution') | |
return fig | |
def create_experience_timeline(resume_text): | |
""" | |
Creates an experience timeline from the resume text. | |
""" | |
prompt = f""" | |
From the following resume text, extract the job titles, companies, and durations of employment. Provide the information in a table format with columns: Job Title, Company, Duration (in years). | |
Resume Text: | |
{resume_text} | |
Table: | |
""" | |
try: | |
response = llm.invoke(prompt) | |
table_text = response.content.strip() | |
data = [] | |
for line in table_text.split('\n'): | |
if line.strip() and not line.lower().startswith("job title"): | |
parts = line.split('|') | |
if len(parts) == 3: | |
job_title = parts[0].strip() | |
company = parts[1].strip() | |
duration = parts[2].strip() | |
duration_years = parse_duration(duration) | |
data.append({"Job Title": job_title, "Company": company, "Duration (years)": duration_years}) | |
df = pd.DataFrame(data) | |
if not df.empty: | |
df['Start Year'] = df['Duration (years)'].cumsum() - df['Duration (years)'] | |
df['End Year'] = df['Duration (years)'].cumsum() | |
fig = px.timeline(df, x_start="Start Year", x_end="End Year", y="Job Title", color="Company", title="Experience Timeline") | |
fig.update_yaxes(categoryorder="total ascending") | |
return fig | |
else: | |
return None | |
except Exception as e: | |
st.error(f"Error creating experience timeline: {e}") | |
return None | |
def parse_duration(duration_str): | |
""" | |
Parses duration strings like '2 years' or '6 months' into float years. | |
""" | |
try: | |
if 'year' in duration_str.lower(): | |
years = float(re.findall(r'\d+\.?\d*', duration_str)[0]) | |
return years | |
elif 'month' in duration_str.lower(): | |
months = float(re.findall(r'\d+\.?\d*', duration_str)[0]) | |
return months / 12 | |
else: | |
return 0 | |
except: | |
return 0 | |
# ------------------------------- | |
# Job API Integration Functions | |
# ------------------------------- | |
def fetch_remotive_jobs_api(job_title, location=None, category=None, remote=True, max_results=50): | |
""" | |
Fetches job listings from Remotive API. | |
""" | |
base_url = "https://remotive.com/api/remote-jobs" | |
params = {"search": job_title, "limit": max_results} | |
if category: | |
params["category"] = category | |
try: | |
response = requests.get(base_url, params=params) | |
response.raise_for_status() | |
jobs = response.json().get("jobs", []) | |
if remote: | |
jobs = [job for job in jobs if job.get("candidate_required_location") == "Worldwide" or job.get("remote") == True] | |
return jobs | |
except requests.exceptions.RequestException as e: | |
st.error(f"Error fetching jobs from Remotive: {e}") | |
return [] | |
def fetch_muse_jobs_api(job_title, location=None, category=None, max_results=50): | |
""" | |
Fetches job listings from The Muse API. | |
""" | |
base_url = "https://www.themuse.com/api/public/jobs" | |
headers = {"Content-Type": "application/json"} | |
params = {"page": 1, "per_page": max_results, "category": category, "location": location, "company": None} | |
try: | |
response = requests.get(base_url, params=params, headers=headers) | |
response.raise_for_status() | |
jobs = response.json().get("results", []) | |
filtered_jobs = [job for job in jobs if job_title.lower() in job.get("name", "").lower()] | |
return filtered_jobs | |
except requests.exceptions.RequestException as e: | |
st.error(f"Error fetching jobs from The Muse: {e}") | |
return [] | |
def fetch_indeed_jobs_list_api(job_title, location="United States", distance="1.0", language="en_GB", remoteOnly="false", datePosted="month", employmentTypes="fulltime;parttime;intern;contractor", index=0, page_size=10): | |
""" | |
Fetches a list of job IDs from Indeed API. | |
""" | |
url = "https://jobs-api14.p.rapidapi.com/list" | |
querystring = { | |
"query": job_title, | |
"location": location, | |
"distance": distance, | |
"language": language, | |
"remoteOnly": remoteOnly, | |
"datePosted": datePosted, | |
"employmentTypes": employmentTypes, | |
"index": str(index), | |
"page_size": str(page_size) | |
} | |
headers = {"x-rapidapi-key": RAPIDAPI_KEY, "x-rapidapi-host": "jobs-api14.p.rapidapi.com"} | |
try: | |
response = requests.get(url, headers=headers, params=querystring) | |
response.raise_for_status() | |
data = response.json() | |
job_ids = [job["id"] for job in data.get("jobs", [])] | |
return job_ids | |
except requests.exceptions.RequestException as e: | |
st.error(f"Error fetching job IDs from Indeed: {e}") | |
return [] | |
def fetch_indeed_job_details_api(job_id, language="en_GB"): | |
""" | |
Fetches job details from Indeed API. | |
""" | |
url = "https://jobs-api14.p.rapidapi.com/get" | |
querystring = {"id": job_id, "language": language} | |
headers = {"x-rapidapi-key": RAPIDAPI_KEY, "x-rapidapi-host": "jobs-api14.p.rapidapi.com"} | |
try: | |
response = requests.get(url, headers=headers, params=querystring) | |
response.raise_for_status() | |
return response.json() | |
except requests.exceptions.RequestException as e: | |
st.error(f"Error fetching job details from Indeed: {e}") | |
return {} | |
def recommend_indeed_jobs(user_skills, user_preferences): | |
""" | |
Recommends jobs from Indeed API based on user skills and preferences. | |
""" | |
job_title = user_preferences.get("job_title", "") | |
location = user_preferences.get("location", "United States") | |
category = user_preferences.get("category", "") | |
language = "en_GB" | |
job_ids = fetch_indeed_jobs_list_api(job_title, location=location, category=category, page_size=5) | |
recommended_jobs = [] | |
api_calls_needed = len(job_ids) | |
if not can_make_api_calls(api_calls_needed): | |
st.error("❌ You have reached your monthly API request limit. Please try again later.") | |
return [] | |
for job_id in job_ids: | |
job_details = fetch_indeed_job_details_api(job_id, language=language) | |
if job_details and not job_details.get("hasError", True): | |
job_description = job_details.get("description", "").lower() | |
match_score = sum(skill.lower() in job_description for skill in user_skills) | |
if match_score > 0: | |
recommended_jobs.append((match_score, job_details)) | |
decrement_api_calls(1) | |
recommended_jobs.sort(reverse=True, key=lambda x: x[0]) | |
return [job for score, job in recommended_jobs[:10]] | |
def recommend_jobs(user_skills, user_preferences): | |
""" | |
Combines job recommendations from Remotive, The Muse, and Indeed. | |
""" | |
remotive_jobs = fetch_remotive_jobs_api(user_preferences.get("job_title", ""), user_preferences.get("location"), user_preferences.get("category")) | |
muse_jobs = fetch_muse_jobs_api(user_preferences.get("job_title", ""), user_preferences.get("location"), user_preferences.get("category")) | |
indeed_jobs = recommend_indeed_jobs(user_skills, user_preferences) | |
combined_jobs = remotive_jobs + muse_jobs + indeed_jobs | |
unique_jobs = {} | |
for job in combined_jobs: | |
url = job.get("url") or job.get("redirect_url") or job.get("url_standard") | |
if url and url not in unique_jobs: | |
unique_jobs[url] = job | |
return list(unique_jobs.values()) | |
# ------------------------------- | |
# API Usage Counter Functions | |
# ------------------------------- | |
def init_api_usage_db(): | |
""" | |
Initializes the SQLite database and creates the api_usage table if it doesn't exist. | |
""" | |
conn = sqlite3.connect('applications.db') | |
c = conn.cursor() | |
c.execute(''' | |
CREATE TABLE IF NOT EXISTS api_usage ( | |
id INTEGER PRIMARY KEY AUTOINCREMENT, | |
count INTEGER, | |
last_reset DATE | |
) | |
''') | |
c.execute('SELECT COUNT(*) FROM api_usage') | |
if c.fetchone()[0] == 0: | |
c.execute('INSERT INTO api_usage (count, last_reset) VALUES (?, ?)', (25, datetime.now().date())) | |
conn.commit() | |
conn.close() | |
def get_api_usage(): | |
""" | |
Retrieves the current API usage count and last reset date. | |
""" | |
conn = sqlite3.connect('applications.db') | |
c = conn.cursor() | |
c.execute('SELECT count, last_reset FROM api_usage WHERE id = 1') | |
row = c.fetchone() | |
conn.close() | |
if row: | |
return row[0], datetime.strptime(row[1], "%Y-%m-%d").date() | |
else: | |
return 25, datetime.now().date() | |
def reset_api_usage(): | |
""" | |
Resets the API usage count to 25. | |
""" | |
conn = sqlite3.connect('applications.db') | |
c = conn.cursor() | |
c.execute('UPDATE api_usage SET count = ?, last_reset = ? WHERE id = 1', (25, datetime.now().date())) | |
conn.commit() | |
conn.close() | |
def can_make_api_calls(requests_needed): | |
""" | |
Checks if there are enough API calls remaining. | |
""" | |
count, last_reset = get_api_usage() | |
today = datetime.now().date() | |
if today >= last_reset + timedelta(days=30): | |
reset_api_usage() | |
count, last_reset = get_api_usage() | |
return count >= requests_needed | |
def decrement_api_calls(requests_used): | |
""" | |
Decrements the API usage count. | |
""" | |
conn = sqlite3.connect('applications.db') | |
c = conn.cursor() | |
c.execute('SELECT count FROM api_usage WHERE id = 1') | |
row = c.fetchone() | |
if row: | |
new_count = max(row[0] - requests_used, 0) | |
c.execute('UPDATE api_usage SET count = ? WHERE id = 1', (new_count,)) | |
conn.commit() | |
conn.close() | |
# ------------------------------- | |
# Application Tracking Functions | |
# ------------------------------- | |
def init_db(): | |
""" | |
Initializes the SQLite database and creates the applications table. | |
""" | |
conn = sqlite3.connect('applications.db') | |
c = conn.cursor() | |
c.execute(''' | |
CREATE TABLE IF NOT EXISTS applications ( | |
id INTEGER PRIMARY KEY AUTOINCREMENT, | |
job_title TEXT, | |
company TEXT, | |
application_date TEXT, | |
status TEXT, | |
deadline TEXT, | |
notes TEXT, | |
job_description TEXT, | |
resume_text TEXT, | |
skills TEXT | |
) | |
''') | |
conn.commit() | |
conn.close() | |
def add_application(job_title, company, application_date, status, deadline, notes, job_description, resume_text, skills): | |
""" | |
Adds a new job application to the database. | |
""" | |
conn = sqlite3.connect('applications.db') | |
c = conn.cursor() | |
c.execute(''' | |
INSERT INTO applications (job_title, company, application_date, status, deadline, notes, job_description, resume_text, skills) | |
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?) | |
''', (job_title, company, application_date, status, deadline, notes, job_description, resume_text, ', '.join(skills))) | |
conn.commit() | |
conn.close() | |
def fetch_applications(): | |
""" | |
Fetches all applications from the database. | |
""" | |
conn = sqlite3.connect('applications.db') | |
c = conn.cursor() | |
c.execute('SELECT * FROM applications') | |
data = c.fetchall() | |
conn.close() | |
applications = [] | |
for app in data: | |
applications.append({ | |
"ID": app[0], | |
"Job Title": app[1], | |
"Company": app[2], | |
"Application Date": app[3], | |
"Status": app[4], | |
"Deadline": app[5], | |
"Notes": app[6], | |
"Job Description": app[7], | |
"Resume Text": app[8], | |
"Skills": app[9].split(', ') if app[9] else [] | |
}) | |
return applications | |
def update_application_status(app_id, new_status): | |
""" | |
Updates the status of an application. | |
""" | |
conn = sqlite3.connect('applications.db') | |
c = conn.cursor() | |
c.execute('UPDATE applications SET status = ? WHERE id = ?', (new_status, app_id)) | |
conn.commit() | |
conn.close() | |
def delete_application(app_id): | |
""" | |
Deletes an application from the database. | |
""" | |
conn = sqlite3.connect('applications.db') | |
c = conn.cursor() | |
c.execute('DELETE FROM applications WHERE id = ?', (app_id,)) | |
conn.commit() | |
conn.close() | |
# ------------------------------- | |
# Learning Path Generation | |
# ------------------------------- | |
def generate_learning_path(career_goal, current_skills): | |
""" | |
Generates a personalized learning path using Groq. | |
""" | |
prompt = f""" | |
Based on the following career goal and current skills, create a personalized learning path that includes recommended courses, projects, and milestones. | |
**Career Goal:** | |
{career_goal} | |
**Current Skills:** | |
{current_skills} | |
**Learning Path:** | |
""" | |
try: | |
response = llm.invoke(prompt) | |
return response.content.strip() | |
except Exception as e: | |
st.error(f"Error generating learning path: {e}") | |
return "" | |
# ------------------------------- | |
# YouTube Video Search and Embed Functions | |
# ------------------------------- | |
def search_youtube_videos(query, max_results=2, video_duration="long"): | |
""" | |
Searches YouTube for videos matching the query. | |
""" | |
search_url = "https://www.googleapis.com/youtube/v3/search" | |
params = { | |
"part": "snippet", | |
"q": query, | |
"type": "video", | |
"maxResults": max_results, | |
"videoDuration": video_duration, | |
"key": YOUTUBE_API_KEY | |
} | |
try: | |
response = requests.get(search_url, params=params) | |
response.raise_for_status() | |
results = response.json().get("items", []) | |
video_urls = [f"https://www.youtube.com/watch?v={item['id']['videoId']}" for item in results] | |
return video_urls | |
except requests.exceptions.RequestException as e: | |
st.error(f"❌ Error fetching YouTube videos: {e}") | |
return [] | |
def embed_youtube_videos(video_urls, module_name): | |
""" | |
Embeds YouTube videos. | |
""" | |
for url in video_urls: | |
st.video(url) | |
# ------------------------------- | |
# Application Modules (Pages) | |
# ------------------------------- | |
def email_generator_page(): | |
st.header("📧 Automated Email Generator") | |
st.write("Generate personalized cold emails based on job postings and your resume.") | |
col1, col2 = st.columns(2) | |
with col1: | |
job_link = st.text_input("🔗 Enter the job link:") | |
with col2: | |
uploaded_file = st.file_uploader("📄 Upload your resume (PDF):", type="pdf") | |
if st.button("Generate Email"): | |
if not job_link: | |
st.error("Please enter a job link.") | |
return | |
if not uploaded_file: | |
st.error("Please upload your resume.") | |
return | |
with st.spinner("Processing..."): | |
job_description = extract_job_description(job_link) | |
if not job_description: | |
st.error("Failed to extract job description.") | |
return | |
requirements = extract_requirements(job_description) | |
if not requirements: | |
st.error("Failed to extract requirements.") | |
return | |
resume_text = extract_text_from_pdf(uploaded_file) | |
if not resume_text: | |
st.error("Failed to extract text from resume.") | |
return | |
email_text = generate_email(job_description, requirements, resume_text) | |
if email_text: | |
st.subheader("📨 Generated Email:") | |
st.write(email_text) | |
st.download_button("Download Email", data=email_text, file_name="generated_email.txt", mime="text/plain") | |
else: | |
st.error("Failed to generate email.") | |
def cover_letter_generator_page(): | |
st.header("📝 Automated Cover Letter Generator") | |
st.write("Generate personalized cover letters based on job postings and your resume.") | |
col1, col2 = st.columns(2) | |
with col1: | |
job_link = st.text_input("🔗 Enter the job link:") | |
with col2: | |
uploaded_file = st.file_uploader("📄 Upload your resume (PDF):", type="pdf") | |
if st.button("Generate Cover Letter"): | |
if not job_link: | |
st.error("Please enter a job link.") | |
return | |
if not uploaded_file: | |
st.error("Please upload your resume.") | |
return | |
with st.spinner("Processing..."): | |
job_description = extract_job_description(job_link) | |
if not job_description: | |
st.error("Failed to extract job description.") | |
return | |
requirements = extract_requirements(job_description) | |
if not requirements: | |
st.error("Failed to extract requirements.") | |
return | |
resume_text = extract_text_from_pdf(uploaded_file) | |
if not resume_text: | |
st.error("Failed to extract text from resume.") | |
return | |
cover_letter = generate_cover_letter(job_description, requirements, resume_text) | |
if cover_letter: | |
st.subheader("📝 Generated Cover Letter:") | |
st.write(cover_letter) | |
st.download_button("Download Cover Letter", data=cover_letter, file_name="generated_cover_letter.txt", mime="text/plain") | |
else: | |
st.error("Failed to generate cover letter.") | |
def resume_analysis_page(): | |
st.header("📄 Resume Analysis and Optimization") | |
st.write("Enhance your resume by extracting key information, suggestions, and visual analytics.") | |
uploaded_file = st.file_uploader("📂 Upload your resume (PDF):", type="pdf") | |
if uploaded_file: | |
resume_text = extract_text_from_pdf(uploaded_file) | |
if resume_text: | |
st.success("✅ Resume uploaded successfully!") | |
st.subheader("🔍 Extracted Information") | |
tabs = st.tabs(["💼 Skills", "🔑 Suggested Keywords"]) | |
with tabs[0]: | |
skills = extract_skills(resume_text) | |
if skills: | |
st.markdown("**Identified Skills:**") | |
cols = st.columns(4) | |
for idx, skill in enumerate(skills, 1): | |
cols[idx % 4].write(f"- {skill}") | |
else: | |
st.info("No skills extracted.") | |
with tabs[1]: | |
keywords = suggest_keywords(resume_text) | |
if keywords: | |
st.markdown("**Suggested Keywords for ATS Optimization:**") | |
cols = st.columns(4) | |
for idx, keyword in enumerate(keywords, 1): | |
cols[idx % 4].write(f"- {keyword}") | |
else: | |
st.info("No keywords suggested.") | |
st.subheader("🛠️ Optimization Suggestions") | |
st.markdown(""" | |
- **Keyword Optimization:** Incorporate suggested keywords. | |
- **Highlight Relevant Sections:** Emphasize skills that match job requirements. | |
- **Consistent Formatting:** Ensure readability and structure. | |
""") | |
st.subheader("📊 Visual Resume Analytics") | |
viz_col1, viz_col2 = st.columns(2) | |
with viz_col1: | |
if skills: | |
st.markdown("**Skill Distribution:**") | |
fig_skills = create_skill_distribution_chart(skills) | |
st.plotly_chart(fig_skills, use_container_width=True) | |
else: | |
st.info("No skills to display.") | |
with viz_col2: | |
fig_experience = create_experience_timeline(resume_text) | |
if fig_experience: | |
st.markdown("**Experience Timeline:**") | |
st.plotly_chart(fig_experience, use_container_width=True) | |
else: | |
st.info("Not enough data to generate an experience timeline.") | |
st.subheader("💾 Save Resume Analysis") | |
if st.button("Save Resume Analysis"): | |
add_application( | |
job_title="N/A", | |
company="N/A", | |
application_date=datetime.now().strftime("%Y-%m-%d"), | |
status="N/A", | |
deadline="N/A", | |
notes="Resume Analysis", | |
job_description="N/A", | |
resume_text=resume_text, | |
skills=skills | |
) | |
st.success("✅ Resume analysis saved successfully!") | |
else: | |
st.error("❌ Failed to extract text from resume.") | |
def application_tracking_dashboard(): | |
st.header("📋 Application Tracking Dashboard") | |
init_db() | |
init_api_usage_db() | |
st.subheader("➕ Add New Application") | |
with st.form("add_application"): | |
job_title = st.text_input("🖇️ Job Title") | |
company = st.text_input("🏢 Company") | |
application_date = st.date_input("📅 Application Date", datetime.today()) | |
status = st.selectbox("📈 Status", ["Applied", "Interviewing", "Offered", "Rejected"]) | |
deadline = st.date_input("⏰ Application Deadline", datetime.today() + timedelta(days=30)) | |
notes = st.text_area("📝 Notes") | |
uploaded_file = st.file_uploader("📂 Upload Job Description (PDF)", type="pdf") | |
uploaded_resume = st.file_uploader("📄 Upload Resume (PDF)", type="pdf") | |
submitted = st.form_submit_button("➕ Add Application") | |
if submitted: | |
job_description = extract_text_from_pdf(uploaded_file) if uploaded_file else "" | |
if uploaded_resume: | |
resume_text = extract_text_from_pdf(uploaded_resume) | |
skills = extract_skills(resume_text) | |
else: | |
resume_text = "" | |
skills = [] | |
add_application( | |
job_title=job_title, | |
company=company, | |
application_date=application_date.strftime("%Y-%m-%d"), | |
status=status, | |
deadline=deadline.strftime("%Y-%m-%d"), | |
notes=notes, | |
job_description=job_description, | |
resume_text=resume_text, | |
skills=skills | |
) | |
st.success("✅ Application added successfully!") | |
st.subheader("📊 Your Applications") | |
applications = fetch_applications() | |
if applications: | |
df = pd.DataFrame(applications) | |
df = df.drop(columns=["Job Description", "Resume Text", "Skills"]) | |
st.dataframe(df) | |
csv = df.to_csv(index=False).encode('utf-8') | |
st.download_button("💾 Download Applications as CSV", data=csv, file_name='applications.csv', mime='text/csv') | |
st.subheader("📥 Import Applications") | |
uploaded_csv = st.file_uploader("📁 Upload a CSV file", type="csv") | |
if uploaded_csv: | |
try: | |
imported_df = pd.read_csv(uploaded_csv) | |
required_columns = {"Job Title", "Company", "Application Date", "Status", "Deadline", "Notes"} | |
if not required_columns.issubset(imported_df.columns): | |
st.error("❌ Uploaded CSV is missing required columns.") | |
else: | |
for _, row in imported_df.iterrows(): | |
add_application( | |
job_title=row.get("Job Title", "N/A"), | |
company=row.get("Company", "N/A"), | |
application_date=row.get("Application Date", datetime.now().strftime("%Y-%m-%d")), | |
status=row.get("Status", "Applied"), | |
deadline=row.get("Deadline", ""), | |
notes=row.get("Notes", ""), | |
job_description=row.get("Job Description", ""), | |
resume_text=row.get("Resume Text", ""), | |
skills=row.get("Skills", "").split(', ') if row.get("Skills") else [] | |
) | |
st.success("✅ Applications imported successfully!") | |
except Exception as e: | |
st.error(f"❌ Error importing applications: {e}") | |
for app in applications: | |
with st.expander(f"{app['Job Title']} at {app['Company']}"): | |
st.write(f"**📅 Application Date:** {app['Application Date']}") | |
st.write(f"**⏰ Deadline:** {app['Deadline']}") | |
st.write(f"**📈 Status:** {app['Status']}") | |
st.write(f"**📝 Notes:** {app['Notes']}") | |
new_status = st.selectbox("🔄 Update Status:", ["Applied", "Interviewing", "Offered", "Rejected"], key=f"status_{app['ID']}") | |
if st.button("🔁 Update Status", key=f"update_{app['ID']}"): | |
update_application_status(app['ID'], new_status) | |
st.success("✅ Status updated successfully!") | |
if st.button("🗑️ Delete Application", key=f"delete_{app['ID']}"): | |
delete_application(app['ID']) | |
st.success("✅ Application deleted successfully!") | |
else: | |
st.write("ℹ️ No applications found.") | |
def job_recommendations_module(): | |
st.header("🔍 Job Matching & Recommendations") | |
st.write("Discover job opportunities tailored to your skills and preferences.") | |
st.subheader("🎯 Set Your Preferences") | |
with st.form("preferences_form"): | |
job_title = st.text_input("🔍 Desired Job Title", placeholder="e.g., Data Scientist") | |
location = st.text_input("📍 Preferred Location", placeholder="e.g., New York, USA or Remote") | |
category = st.selectbox("📂 Job Category", ["", "Engineering", "Marketing", "Design", "Sales", "Finance", "Healthcare", "Education", "Other"]) | |
user_skills_input = st.text_input("💡 Your Skills (comma-separated)", placeholder="e.g., Python, Machine Learning, SQL") | |
submitted = st.form_submit_button("🚀 Get Recommendations") | |
if submitted: | |
if not job_title or not user_skills_input: | |
st.error("❌ Please enter both job title and your skills.") | |
return | |
user_skills = [skill.strip() for skill in user_skills_input.split(",") if skill.strip()] | |
user_preferences = {"job_title": job_title, "location": location, "category": category} | |
with st.spinner("🔄 Fetching job recommendations..."): | |
recommended_jobs = recommend_jobs(user_skills, user_preferences) | |
if recommended_jobs: | |
st.subheader("💼 Recommended Jobs:") | |
for idx, job in enumerate(recommended_jobs, 1): | |
job_title_display = job.get("title") or job.get("name") or job.get("jobTitle") | |
company_display = job.get("company", {}).get("name") or job.get("company_name") or job.get("employer", {}).get("name") | |
location_display = job.get("candidate_required_location") or job.get("location") or job.get("country") | |
job_url = job.get("url") or job.get("redirect_url") or job.get("url_standard") | |
st.markdown(f"### {idx}. {job_title_display}") | |
st.markdown(f"**🏢 Company:** {company_display}") | |
st.markdown(f"**📍 Location:** {location_display}") | |
st.markdown(f"**🔗 Job URL:** [Apply Here]({job_url})") | |
st.write("---") | |
else: | |
st.info("ℹ️ No job recommendations found based on your criteria.") | |
def interview_preparation_module(): | |
st.header("🎤 Interview Preparation") | |
st.write("Prepare for your interviews with tailored mock questions and answers.") | |
col1, col2 = st.columns(2) | |
with col1: | |
job_title = st.text_input("🔍 Enter the job title you're applying for:") | |
with col2: | |
company = st.text_input("🏢 Enter the company name:") | |
if st.button("🎯 Generate Mock Interview Questions"): | |
if not job_title or not company: | |
st.error("❌ Please enter both job title and company name.") | |
return | |
with st.spinner("⏳ Generating questions..."): | |
prompt = f""" | |
Generate a list of 50 interview questions along with their answers for the position of {job_title} at {company}. Each question should be followed by a concise and professional answer. | |
""" | |
try: | |
qa_text = llm.invoke(prompt).content.strip() | |
qa_pairs = qa_text.split('\n\n') | |
st.subheader("🗣️ Mock Interview Questions and Answers:") | |
for idx, qa in enumerate(qa_pairs, 1): | |
if qa.strip(): | |
parts = qa.split('\n', 1) | |
if len(parts) == 2: | |
question = parts[0].strip() | |
answer = parts[1].strip() | |
st.markdown(f"**Q{idx}: {question}**") | |
st.markdown(f"**A:** {answer}") | |
st.write("---") | |
except Exception as e: | |
st.error(f"❌ Error generating interview questions: {e}") | |
def personalized_learning_paths_module(): | |
st.header("📚 Personalized Learning Paths") | |
st.write("Receive tailored learning plans to help you achieve your career goals, complemented with curated video resources.") | |
col1, col2 = st.columns(2) | |
with col1: | |
career_goal = st.text_input("🎯 Enter your career goal (e.g., Data Scientist):") | |
with col2: | |
current_skills = st.text_input("💡 Enter your current skills (comma-separated):") | |
if st.button("🚀 Generate Learning Path"): | |
if not career_goal or not current_skills: | |
st.error("❌ Please enter both career goal and current skills.") | |
return | |
with st.spinner("🔄 Generating your personalized learning path..."): | |
learning_path = generate_learning_path(career_goal, current_skills) | |
if learning_path: | |
st.subheader("📜 Your Personalized Learning Path:") | |
st.write(learning_path) | |
modules = re.split(r'\d+\.\s+', learning_path) | |
modules = [module.strip() for module in modules if module.strip()] | |
st.subheader("📹 Recommended YouTube Videos for Each Module:") | |
for module in modules: | |
video_urls = search_youtube_videos(query=module, max_results=2, video_duration="long") | |
if video_urls: | |
st.markdown(f"### {module}") | |
embed_youtube_videos(video_urls, module) | |
else: | |
st.write(f"No videos found for **{module}**.") | |
else: | |
st.error("❌ Failed to generate learning path.") | |
def networking_opportunities_module(): | |
st.header("🤝 Networking Opportunities") | |
st.write("Expand your professional network by connecting with relevant industry peers and groups.") | |
col1, col2 = st.columns(2) | |
with col1: | |
user_skills = st.text_input("💡 Enter your key skills (comma-separated):") | |
with col2: | |
industry = st.text_input("🏭 Enter your industry (e.g., Technology):") | |
if st.button("🔍 Find Networking Opportunities"): | |
if not user_skills or not industry: | |
st.error("❌ Please enter both key skills and industry.") | |
return | |
with st.spinner("🔄 Fetching networking opportunities..."): | |
prompt = f""" | |
Based on the following skills: {user_skills}, and industry: {industry}, suggest relevant LinkedIn groups, professional organizations, and industry events for networking. | |
""" | |
try: | |
suggestions = llm.invoke(prompt).content.strip() | |
st.subheader("🔗 Recommended Networking Groups and Events:") | |
st.write(suggestions) | |
except Exception as e: | |
st.error(f"❌ Error fetching networking opportunities: {e}") | |
def feedback_and_improvement_module(): | |
st.header("🗣️ Feedback and Continuous Improvement") | |
st.write("We value your feedback! Let us know how we can improve your experience.") | |
with st.form("feedback_form"): | |
name = st.text_input("👤 Your Name") | |
email = st.text_input("📧 Your Email") | |
feedback_type = st.selectbox("📂 Type of Feedback", ["Bug Report", "Feature Request", "General Feedback"]) | |
feedback = st.text_area("📝 Your Feedback") | |
submitted = st.form_submit_button("✅ Submit") | |
if submitted: | |
if not name or not email or not feedback: | |
st.error("❌ Please fill in all the fields.") | |
else: | |
# You can store the feedback in a database or send via email | |
st.success("✅ Thank you for your feedback!") | |
def resource_library_page(): | |
st.header("📚 Resource Library") | |
st.write("Access a collection of templates and guides to enhance your job search.") | |
resources = [ | |
{"title": "Resume Template", "description": "A professional resume template in DOCX format.", "file": "./resume_template.docx"}, | |
{"title": "Cover Letter Template", "description": "A customizable cover letter template.", "file": "./cover_letter_template.docx"}, | |
{"title": "Job Application Checklist", "description": "A checklist to ensure you cover all steps.", "file": "./application_checklist.pdf"} | |
] | |
for resource in resources: | |
st.markdown(f"### {resource['title']}") | |
st.write(resource['description']) | |
try: | |
with open(resource['file'], "rb") as file: | |
st.download_button("⬇️ Download", data=file, file_name=os.path.basename(resource['file']), mime="application/octet-stream") | |
except FileNotFoundError: | |
st.error(f"❌ File {resource['file']} not found. Please ensure the file is in the correct directory.") | |
st.write("---") | |
def chatbot_support_page(): | |
st.header("🤖 AI-Powered Chatbot Support") | |
st.write("Have questions or need assistance? Chat with our AI-powered assistant!") | |
if 'chat_history' not in st.session_state: | |
st.session_state['chat_history'] = [] | |
user_input = st.text_input("🗨️ You:", key="user_input") | |
if st.button("Send"): | |
if user_input: | |
st.session_state['chat_history'].append({"message": user_input, "is_user": True}) | |
prompt = f""" | |
You are a helpful assistant for a Job Application Assistant app. Answer the user's query based on the following context: | |
{user_input} | |
""" | |
try: | |
response = llm.invoke(prompt) | |
assistant_message = response.content.strip() | |
st.session_state['chat_history'].append({"message": assistant_message, "is_user": False}) | |
except Exception as e: | |
error_message = "❌ Sorry, I encountered an error while processing your request." | |
st.session_state['chat_history'].append({"message": error_message, "is_user": False}) | |
st.error(f"❌ Error in chatbot: {e}") | |
for chat in st.session_state['chat_history']: | |
if chat['is_user']: | |
message(chat['message'], is_user=True, avatar_style="thumbs") | |
else: | |
message(chat['message'], is_user=False, avatar_style="bottts") | |
def help_page(): | |
st.header("❓ Help & FAQ") | |
with st.expander("🛠️ How do I generate a cover letter?"): | |
st.write("Navigate to the **Cover Letter Generator** section, enter the job link, upload your resume, and click **Generate Cover Letter**.") | |
with st.expander("📋 How do I track my applications?"): | |
st.write("Use the **Application Tracking Dashboard** to add and manage your job applications.") | |
with st.expander("📄 How can I optimize my resume?"): | |
st.write("Upload your resume in the **Resume Analysis** section to extract skills and receive optimization suggestions.") | |
with st.expander("📥 How do I import my applications?"): | |
st.write("In the **Application Tracking Dashboard**, use the **Import Applications** section to upload a CSV file with the required columns.") | |
with st.expander("🗣️ How do I provide feedback?"): | |
st.write("Go to the **Feedback** section, fill out the form, and submit your feedback.") | |
# ------------------------------- | |
# Main Application | |
# ------------------------------- | |
def main_app(): | |
st.markdown( | |
""" | |
<style> | |
.reportview-container { background-color: #f5f5f5; } | |
.sidebar .sidebar-content { background-image: linear-gradient(#2e7bcf, #2e7bcf); color: white; } | |
</style> | |
""", | |
unsafe_allow_html=True | |
) | |
with st.sidebar: | |
selected = option_menu( | |
menu_title="📂 Main Menu", | |
options=[ | |
"Email Generator", "Cover Letter Generator", "Resume Analysis", | |
"Application Tracking", "Job Recommendations", "Interview Preparation", | |
"Personalized Learning Paths", "Networking Opportunities", | |
"Feedback", "Resource Library", "Chatbot Support", "Help" | |
], | |
icons=[ | |
"envelope", "file-earmark-text", "file-person", "briefcase", | |
"search", "microphone", "book", "people", | |
"chat-left-text", "collection", "robot", "question-circle" | |
], | |
menu_icon="cast", | |
default_index=0, | |
styles={ | |
"container": {"padding": "5!important", "background-color": "#2e7bcf"}, | |
"icon": {"color": "white", "font-size": "18px"}, | |
"nav-link": {"font-size": "16px", "text-align": "left", "margin": "0px", "--hover-color": "#6b9eff"}, | |
"nav-link-selected": {"background-color": "#1e5aab"}, | |
} | |
) | |
if selected == "Email Generator": | |
email_generator_page() | |
elif selected == "Cover Letter Generator": | |
cover_letter_generator_page() | |
elif selected == "Resume Analysis": | |
resume_analysis_page() | |
elif selected == "Application Tracking": | |
application_tracking_dashboard() | |
elif selected == "Job Recommendations": | |
job_recommendations_module() | |
elif selected == "Interview Preparation": | |
interview_preparation_module() | |
elif selected == "Personalized Learning Paths": | |
personalized_learning_paths_module() | |
elif selected == "Networking Opportunities": | |
networking_opportunities_module() | |
elif selected == "Feedback": | |
feedback_and_improvement_module() | |
elif selected == "Resource Library": | |
resource_library_page() | |
elif selected == "Chatbot Support": | |
chatbot_support_page() | |
elif selected == "Help": | |
help_page() | |
if __name__ == "__main__": | |
main_app() | |